https://github.com/AZero13 updated 
https://github.com/llvm/llvm-project/pull/144380

>From 9d085f7c9ad46eb3ad26e5bd06435532be72c50d Mon Sep 17 00:00:00 2001
From: AZero13 <[email protected]>
Date: Thu, 22 Jan 2026 10:31:32 -0500
Subject: [PATCH 1/2] Pre-commit tests (NFC)

---
 .../AArch64/combine-comparisons-by-cse.ll     | 669 ++++++++++++++++++
 1 file changed, 669 insertions(+)

diff --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll 
b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index 4449c2b9193a4..80eab2749ce10 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -949,6 +949,675 @@ return:                                           ; preds 
= %if.end, %land.lhs.t
   ret i32 %retval.0
 }
 
+; (a > 10 && b == c) || (a >= 10 && b == d)
+define i32 @combine_ugt_uge_10() #0 {
+; CHECK-LABEL: combine_ugt_uge_10:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    cmp w9, #11
+; CHECK-NEXT:    b.lo .LBB12_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w10, w9
+; CHECK-NEXT:    b.ne .LBB12_4
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB12_3: // %lor.lhs.false
+; CHECK-NEXT:    cmp w9, #10
+; CHECK-NEXT:    b.lo .LBB12_6
+; CHECK-NEXT:  .LBB12_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB12_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB12_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ugt i32 %0, 10
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %land.lhs.true3
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ugt i32 %0, 9
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false, 
%land.lhs.true
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, 
%lor.lhs.false
+  br label %return
+
+return:                                           ; preds = %if.end, 
%land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, 
%land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a > 5 && b == c) || (a < 5 && b == d)
+define i32 @combine_ugt_ult_5() #0 {
+; CHECK-LABEL: combine_ugt_ult_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #6
+; CHECK-NEXT:    b.lo .LBB13_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB13_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB13_3: // %lor.lhs.false
+; CHECK-NEXT:    cmp w8, #4
+; CHECK-NEXT:    b.hi .LBB13_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB13_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB13_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ugt i32 %0, 5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ult i32 %0, 5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, 
%lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, 
%land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, 
%land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a < 5 && b == c) || (a <= 5 && b == d)
+define i32 @combine_ult_uge_5() #0 {
+; CHECK-LABEL: combine_ult_uge_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    cmp w9, #4
+; CHECK-NEXT:    b.hi .LBB14_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w10, w9
+; CHECK-NEXT:    b.ne .LBB14_4
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB14_3: // %lor.lhs.false
+; CHECK-NEXT:    cmp w9, #5
+; CHECK-NEXT:    b.hi .LBB14_6
+; CHECK-NEXT:  .LBB14_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB14_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB14_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ult i32 %0, 5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %land.lhs.true3
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ult i32 %0, 6
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false, 
%land.lhs.true
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, 
%lor.lhs.false
+  br label %return
+
+return:                                           ; preds = %if.end, 
%land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, 
%land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a < 5 && b == c) || (a > 5 && b == d)
+define i32 @combine_ult_ugt_5() #0 {
+; CHECK-LABEL: combine_ult_ugt_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #4
+; CHECK-NEXT:    b.hi .LBB15_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB15_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB15_3: // %lor.lhs.false
+; CHECK-NEXT:    cmp w8, #6
+; CHECK-NEXT:    b.lo .LBB15_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB15_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB15_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ult i32 %0, 5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ugt i32 %0, 5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, 
%lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, 
%land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, 
%land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a > -5 && b == c) || (a < -5 && b == d)
+define i32 @combine_ugt_ult_n5() #0 {
+; CHECK-LABEL: combine_ugt_ult_n5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #4
+; CHECK-NEXT:    b.lo .LBB16_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB16_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB16_3: // %lor.lhs.false
+; CHECK-NEXT:    cmn w8, #6
+; CHECK-NEXT:    b.hi .LBB16_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB16_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB16_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ugt i32 %0, -5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ult i32 %0, -5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, 
%lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, 
%land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, 
%land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a < -5 && b == c) || (a > -5 && b == d)
+define i32 @combine_ult_ugt_n5() #0 {
+; CHECK-LABEL: combine_ult_ugt_n5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #6
+; CHECK-NEXT:    b.hi .LBB17_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB17_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB17_3: // %lor.lhs.false
+; CHECK-NEXT:    cmn w8, #4
+; CHECK-NEXT:    b.lo .LBB17_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB17_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB17_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ult i32 %0, -5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ugt i32 %0, -5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, 
%lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, 
%land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, 
%land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; Yes, you can mix them too!
+; (a < -5 && b == c) || (a u> -5 && b == d)
+define i32 @combine_ult_gt_n5() #0 {
+; CHECK-LABEL: combine_ult_gt_n5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #6
+; CHECK-NEXT:    b.hi .LBB18_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB18_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB18_3: // %lor.lhs.false
+; CHECK-NEXT:    cmn w8, #4
+; CHECK-NEXT:    b.lt .LBB18_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB18_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB18_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ult i32 %0, -5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp sgt i32 %0, -5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, 
%lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, 
%land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, 
%land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; Test in the following case, we don't hit 'cmp' and trigger a false positive
+; cmp  w19, #0
+; cinc w0, w19, gt
+; ...
+; fcmp d8, #0.0
+; b.gt .LBB0_5
+
+define i32 @fcmpri_u(i32 %argc, ptr nocapture readonly %argv) #0 {
+; CHECK-LABEL: fcmpri_u:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, #2
+; CHECK-NEXT:    b.lo .LBB19_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    ldr x8, [x1, #8]
+; CHECK-NEXT:    cbz x8, .LBB19_3
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #3 // =0x3
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB19_3: // %if.end
+; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    stp x30, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    .cfi_offset b8, -32
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    bl zoo
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    mov w0, #-1 // =0xffffffff
+; CHECK-NEXT:    bl yoo
+; CHECK-NEXT:    cmp w19, #0
+; CHECK-NEXT:    mov w1, #2 // =0x2
+; CHECK-NEXT:    fmov d8, d0
+; CHECK-NEXT:    cinc w0, w19, ne
+; CHECK-NEXT:    bl xoo
+; CHECK-NEXT:    fmov d0, #-1.00000000
+; CHECK-NEXT:    fcmp d8, #0.0
+; CHECK-NEXT:    fmov d1, #-2.00000000
+; CHECK-NEXT:    fadd d0, d8, d0
+; CHECK-NEXT:    fcsel d0, d8, d0, gt
+; CHECK-NEXT:    bl woo
+; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    mov w0, #4 // =0x4
+; CHECK-NEXT:    ldr d8, [sp], #32 // 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
+; CHECK-NEXT:    .cfi_restore w19
+; CHECK-NEXT:    .cfi_restore w30
+; CHECK-NEXT:    .cfi_restore b8
+; CHECK-NEXT:    ret
+
+; CHECK-LABEL-DAG: .LBB9_3
+
+entry:
+  %cmp = icmp ugt i32 %argc, 1
+  br i1 %cmp, label %land.lhs.true, label %if.end
+
+land.lhs.true:                                    ; preds = %entry
+  %arrayidx = getelementptr inbounds ptr, ptr %argv, i64 1
+  %0 = load ptr, ptr %arrayidx, align 8
+  %cmp1 = icmp eq ptr %0, null
+  br i1 %cmp1, label %if.end, label %return
+
+if.end:                                           ; preds = %land.lhs.true, 
%entry
+  %call = call i32 @zoo(i32 1)
+  %call2 = call double @yoo(i32 -1)
+  %cmp4 = icmp ugt i32 %call, 0
+  %add = zext i1 %cmp4 to i32
+  %cond = add nuw i32 %add, %call
+  %call7 = call i32 @xoo(i32 %cond, i32 2)
+  %cmp9 = fcmp ogt double %call2, 0.000000e+00
+  br i1 %cmp9, label %cond.end14, label %cond.false12
+
+cond.false12:                                     ; preds = %if.end
+  %sub = fadd fast double %call2, -1.000000e+00
+  br label %cond.end14
+
+cond.end14:                                       ; preds = %if.end, 
%cond.false12
+  %cond15 = phi double [ %sub, %cond.false12 ], [ %call2, %if.end ]
+  %call16 = call i32 @woo(double %cond15, double -2.000000e+00)
+  br label %return
+
+return:                                           ; preds = %land.lhs.true, 
%cond.end14
+  %retval.0 = phi i32 [ 4, %cond.end14 ], [ 3, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+define void @cmp_shifted_unsigned(i32 %in, i32 %lhs, i32 %rhs) #0 {
+; CHECK-LABEL: cmp_shifted_unsigned:
+; CHECK:       // %bb.0: // %common.ret
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    lsr w9, w0, #13
+; CHECK-NEXT:    mov w8, #42 // =0x2a
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    csinc w8, w8, wzr, ne
+; CHECK-NEXT:    cmp w9, #0
+; CHECK-NEXT:    mov w9, #128 // =0x80
+; CHECK-NEXT:    csel w0, w9, w8, ne
+; CHECK-NEXT:    bl zoo
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
+; CHECK-NEXT:    .cfi_restore w30
+; CHECK-NEXT:    ret
+; [...]
+
+  %tst_low = icmp ugt i32 %in, 8191
+  br i1 %tst_low, label %true, label %false
+
+true:
+  call i32 @zoo(i32 128)
+  ret void
+
+false:
+  %tst = icmp ugt i32 %in, 0
+  br i1 %tst, label %truer, label %falser
+
+truer:
+  call i32 @zoo(i32 42)
+  ret void
+
+falser:
+  call i32 @zoo(i32 1)
+  ret void
+}
+
+define i32 @combine_ugt_uge_sel(i64 %v, ptr %p) #0 {
+; CHECK-LABEL: combine_ugt_uge_sel:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    cmp w9, #0
+; CHECK-NEXT:    csel x10, x0, xzr, ne
+; CHECK-NEXT:    str x10, [x1]
+; CHECK-NEXT:    cbz w9, .LBB21_2
+; CHECK-NEXT:  // %bb.1: // %lor.lhs.false
+; CHECK-NEXT:    cmp w9, #2
+; CHECK-NEXT:    b.hs .LBB21_4
+; CHECK-NEXT:    b .LBB21_6
+; CHECK-NEXT:  .LBB21_2: // %land.lhs.true
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w10, w9
+; CHECK-NEXT:    b.ne .LBB21_4
+; CHECK-NEXT:  // %bb.3:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB21_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB21_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB21_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ugt i32 %0, 0
+  %m = select i1 %cmp, i64 %v, i64 0
+  store i64 %m, ptr %p
+  br i1 %cmp, label %lor.lhs.false, label %land.lhs.true
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %land.lhs.true3
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ugt i32 %0, 1
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false, 
%land.lhs.true
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, 
%lor.lhs.false
+  br label %return
+
+return:                                           ; preds = %if.end, 
%land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, 
%land.lhs.true ]
+  ret i32 %retval.0
+}
+
 declare i32 @zoo(i32)
 
 declare double @yoo(i32)

>From 6f100360ec89c55165b432d0070182e5004155dc Mon Sep 17 00:00:00 2001
From: AZero13 <[email protected]>
Date: Thu, 22 Jan 2026 10:31:42 -0500
Subject: [PATCH 2/2] [AArch64] Add support for unsigned comparisons

We have to be extra careful to not allow unsigned wraps, however. This also 
required some adjusting of the logic in adjustCmp, as well as compare the true 
imm value with add or sub taken into effect.

Because SIGNED_MIN and SIGNED_MAX cannot be an immediate, we do not need to 
worry about those edge cases when dealing with unsigned comparisons.
---
 .../StaticAnalyzer/Checkers/ValistChecker.cpp | 419 ++++++++++++++++++
 flang/include/flang/Lower/Cuda.h              |  74 ++++
 .../AArch64/AArch64ConditionOptimizer.cpp     |  84 +++-
 .../AArch64/combine-comparisons-by-cse.ll     |  49 +-
 4 files changed, 579 insertions(+), 47 deletions(-)
 create mode 100644 clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
 create mode 100644 flang/include/flang/Lower/Cuda.h

diff --git a/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp 
b/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
new file mode 100644
index 0000000000000..503fa5de868f2
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -0,0 +1,419 @@
+//== VAListChecker.cpp - stdarg.h macro usage checker -----------*- C++ 
-*--==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM 
Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines checkers which detect usage of uninitialized va_list values
+// and va_start calls with no matching va_end.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/Support/FormatVariadic.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::formatv;
+
+namespace {
+enum class VAListState {
+  Uninitialized,
+  Unknown,
+  Initialized,
+  Released,
+};
+
+constexpr llvm::StringLiteral StateNames[] = {
+    "uninitialized", "unknown", "initialized", "already released"};
+} // end anonymous namespace
+
+static StringRef describeState(const VAListState S) {
+  return StateNames[static_cast<int>(S)];
+}
+
+REGISTER_MAP_WITH_PROGRAMSTATE(VAListStateMap, const MemRegion *, VAListState)
+
+static VAListState getVAListState(ProgramStateRef State, const MemRegion *Reg) 
{
+  if (const VAListState *Res = State->get<VAListStateMap>(Reg))
+    return *Res;
+  return Reg->getSymbolicBase() ? VAListState::Unknown
+                                : VAListState::Uninitialized;
+}
+
+namespace {
+typedef SmallVector<const MemRegion *, 2> RegionVector;
+
+class VAListChecker : public Checker<check::PreCall, check::PreStmt<VAArgExpr>,
+                                     check::DeadSymbols> {
+  const BugType LeakBug{this, "Leaked va_list", categories::MemoryError,
+                        /*SuppressOnSink=*/true};
+  const BugType UninitAccessBug{this, "Uninitialized va_list",
+                                categories::MemoryError};
+
+  struct VAListAccepter {
+    CallDescription Func;
+    int ParamIndex;
+  };
+  static const SmallVector<VAListAccepter, 15> VAListAccepters;
+  static const CallDescription VaStart, VaEnd, VaCopy;
+
+public:
+  void checkPreStmt(const VAArgExpr *VAA, CheckerContext &C) const;
+  void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+  void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+
+private:
+  const MemRegion *getVAListAsRegion(SVal SV, const Expr *VAExpr,
+                                     CheckerContext &C) const;
+  const ExplodedNode *getStartCallSite(const ExplodedNode *N,
+                                       const MemRegion *Reg) const;
+
+  void reportUninitializedAccess(const MemRegion *VAList, StringRef Msg,
+                                 CheckerContext &C) const;
+  void reportLeaked(const RegionVector &Leaked, StringRef Msg1, StringRef Msg2,
+                    CheckerContext &C, ExplodedNode *N) const;
+
+  void checkVAListStartCall(const CallEvent &Call, CheckerContext &C) const;
+  void checkVAListCopyCall(const CallEvent &Call, CheckerContext &C) const;
+  void checkVAListEndCall(const CallEvent &Call, CheckerContext &C) const;
+
+  class VAListBugVisitor : public BugReporterVisitor {
+  public:
+    VAListBugVisitor(const MemRegion *Reg, bool IsLeak = false)
+        : Reg(Reg), IsLeak(IsLeak) {}
+    void Profile(llvm::FoldingSetNodeID &ID) const override {
+      static int X = 0;
+      ID.AddPointer(&X);
+      ID.AddPointer(Reg);
+    }
+    PathDiagnosticPieceRef getEndPath(BugReporterContext &BRC,
+                                      const ExplodedNode *EndPathNode,
+                                      PathSensitiveBugReport &BR) override {
+      if (!IsLeak)
+        return nullptr;
+
+      PathDiagnosticLocation L = BR.getLocation();
+      // Do not add the statement itself as a range in case of leak.
+      return std::make_shared<PathDiagnosticEventPiece>(L, BR.getDescription(),
+                                                        false);
+    }
+    PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
+                                     BugReporterContext &BRC,
+                                     PathSensitiveBugReport &BR) override;
+
+  private:
+    const MemRegion *Reg;
+    bool IsLeak;
+  };
+};
+
+const SmallVector<VAListChecker::VAListAccepter, 15>
+    VAListChecker::VAListAccepters = {{{CDM::CLibrary, {"vfprintf"}, 3}, 2},
+                                      {{CDM::CLibrary, {"vfscanf"}, 3}, 2},
+                                      {{CDM::CLibrary, {"vprintf"}, 2}, 1},
+                                      {{CDM::CLibrary, {"vscanf"}, 2}, 1},
+                                      {{CDM::CLibrary, {"vsnprintf"}, 4}, 3},
+                                      {{CDM::CLibrary, {"vsprintf"}, 3}, 2},
+                                      {{CDM::CLibrary, {"vsscanf"}, 3}, 2},
+                                      {{CDM::CLibrary, {"vfwprintf"}, 3}, 2},
+                                      {{CDM::CLibrary, {"vfwscanf"}, 3}, 2},
+                                      {{CDM::CLibrary, {"vwprintf"}, 2}, 1},
+                                      {{CDM::CLibrary, {"vwscanf"}, 2}, 1},
+                                      {{CDM::CLibrary, {"vswprintf"}, 4}, 3},
+                                      // vswprintf is the wide version of
+                                      // vsnprintf, vsprintf has no wide 
version
+                                      {{CDM::CLibrary, {"vswscanf"}, 3}, 2}};
+
+const CallDescription VAListChecker::VaStart(CDM::CLibrary,
+                                             {"__builtin_va_start"}, 
/*Args=*/2,
+                                             /*Params=*/1),
+    VAListChecker::VaCopy(CDM::CLibrary, {"__builtin_va_copy"}, 2),
+    VAListChecker::VaEnd(CDM::CLibrary, {"__builtin_va_end"}, 1);
+} // end anonymous namespace
+
+void VAListChecker::checkPreCall(const CallEvent &Call,
+                                 CheckerContext &C) const {
+  if (VaStart.matches(Call))
+    checkVAListStartCall(Call, C);
+  else if (VaCopy.matches(Call))
+    checkVAListCopyCall(Call, C);
+  else if (VaEnd.matches(Call))
+    checkVAListEndCall(Call, C);
+  else {
+    for (const auto &FuncInfo : VAListAccepters) {
+      if (!FuncInfo.Func.matches(Call))
+        continue;
+      const MemRegion *VAList =
+          getVAListAsRegion(Call.getArgSVal(FuncInfo.ParamIndex),
+                            Call.getArgExpr(FuncInfo.ParamIndex), C);
+      if (!VAList)
+        return;
+      VAListState S = getVAListState(C.getState(), VAList);
+
+      if (S == VAListState::Initialized || S == VAListState::Unknown)
+        return;
+
+      std::string ErrMsg =
+          formatv("Function '{0}' is called with an {1} va_list argument",
+                  FuncInfo.Func.getFunctionName(), describeState(S));
+      reportUninitializedAccess(VAList, ErrMsg, C);
+      break;
+    }
+  }
+}
+
+const MemRegion *VAListChecker::getVAListAsRegion(SVal SV, const Expr *E,
+                                                  CheckerContext &C) const {
+  const MemRegion *Reg = SV.getAsRegion();
+  if (!Reg)
+    return nullptr;
+  // TODO: In the future this should be abstracted away by the analyzer.
+  bool VAListModelledAsArray = false;
+  if (const auto *Cast = dyn_cast<CastExpr>(E)) {
+    QualType Ty = Cast->getType();
+    VAListModelledAsArray =
+        Ty->isPointerType() && Ty->getPointeeType()->isRecordType();
+  }
+  if (const auto *DeclReg = Reg->getAs<DeclRegion>()) {
+    if (isa<ParmVarDecl>(DeclReg->getDecl()))
+      Reg = C.getState()->getSVal(SV.castAs<Loc>()).getAsRegion();
+  }
+  // Some VarRegion based VA lists reach here as ElementRegions.
+  const auto *EReg = dyn_cast_or_null<ElementRegion>(Reg);
+  return (EReg && VAListModelledAsArray) ? EReg->getSuperRegion() : Reg;
+}
+
+void VAListChecker::checkPreStmt(const VAArgExpr *VAA,
+                                 CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  const Expr *ArgExpr = VAA->getSubExpr();
+  const MemRegion *VAList = getVAListAsRegion(C.getSVal(ArgExpr), ArgExpr, C);
+  if (!VAList)
+    return;
+  VAListState S = getVAListState(C.getState(), VAList);
+  if (S == VAListState::Initialized || S == VAListState::Unknown)
+    return;
+
+  std::string ErrMsg =
+      formatv("va_arg() is called on an {0} va_list", describeState(S));
+  reportUninitializedAccess(VAList, ErrMsg, C);
+}
+
+void VAListChecker::checkDeadSymbols(SymbolReaper &SR,
+                                     CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  VAListStateMapTy Tracked = State->get<VAListStateMap>();
+  RegionVector Leaked;
+  for (const auto &[Reg, S] : Tracked) {
+    if (SR.isLiveRegion(Reg))
+      continue;
+    if (S == VAListState::Initialized)
+      Leaked.push_back(Reg);
+    State = State->remove<VAListStateMap>(Reg);
+  }
+  if (ExplodedNode *N = C.addTransition(State)) {
+    reportLeaked(Leaked, "Initialized va_list", " is leaked", C, N);
+  }
+}
+
+// This function traverses the exploded graph backwards and finds the node 
where
+// the va_list becomes initialized. That node is used for uniquing the bug
+// paths. It is not likely that there are several different va_lists that
+// belongs to different stack frames, so that case is not yet handled.
+const ExplodedNode *
+VAListChecker::getStartCallSite(const ExplodedNode *N,
+                                const MemRegion *Reg) const {
+  const LocationContext *LeakContext = N->getLocationContext();
+  const ExplodedNode *StartCallNode = N;
+
+  bool SeenInitializedState = false;
+
+  while (N) {
+    VAListState S = getVAListState(N->getState(), Reg);
+    if (S == VAListState::Initialized) {
+      SeenInitializedState = true;
+    } else if (SeenInitializedState) {
+      break;
+    }
+    const LocationContext *NContext = N->getLocationContext();
+    if (NContext == LeakContext || NContext->isParentOf(LeakContext))
+      StartCallNode = N;
+    N = N->pred_empty() ? nullptr : *(N->pred_begin());
+  }
+
+  return StartCallNode;
+}
+
+void VAListChecker::reportUninitializedAccess(const MemRegion *VAList,
+                                              StringRef Msg,
+                                              CheckerContext &C) const {
+  if (ExplodedNode *N = C.generateErrorNode()) {
+    auto R = std::make_unique<PathSensitiveBugReport>(UninitAccessBug, Msg, N);
+    R->markInteresting(VAList);
+    R->addVisitor(std::make_unique<VAListBugVisitor>(VAList));
+    C.emitReport(std::move(R));
+  }
+}
+
+void VAListChecker::reportLeaked(const RegionVector &Leaked, StringRef Msg1,
+                                 StringRef Msg2, CheckerContext &C,
+                                 ExplodedNode *N) const {
+  for (const MemRegion *Reg : Leaked) {
+    const ExplodedNode *StartNode = getStartCallSite(N, Reg);
+    PathDiagnosticLocation LocUsedForUniqueing;
+
+    if (const Stmt *StartCallStmt = StartNode->getStmtForDiagnostics())
+      LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
+          StartCallStmt, C.getSourceManager(), 
StartNode->getLocationContext());
+
+    SmallString<100> Buf;
+    llvm::raw_svector_ostream OS(Buf);
+    OS << Msg1;
+    std::string VariableName = Reg->getDescriptiveName();
+    if (!VariableName.empty())
+      OS << " " << VariableName;
+    OS << Msg2;
+
+    auto R = std::make_unique<PathSensitiveBugReport>(
+        LeakBug, OS.str(), N, LocUsedForUniqueing,
+        StartNode->getLocationContext()->getDecl());
+    R->markInteresting(Reg);
+    R->addVisitor(std::make_unique<VAListBugVisitor>(Reg, true));
+    C.emitReport(std::move(R));
+  }
+}
+
+void VAListChecker::checkVAListStartCall(const CallEvent &Call,
+                                         CheckerContext &C) const {
+  const MemRegion *Arg =
+      getVAListAsRegion(Call.getArgSVal(0), Call.getArgExpr(0), C);
+  if (!Arg)
+    return;
+
+  ProgramStateRef State = C.getState();
+  VAListState ArgState = getVAListState(State, Arg);
+
+  if (ArgState == VAListState::Initialized) {
+    RegionVector Leaked{Arg};
+    if (ExplodedNode *N = C.addTransition(State))
+      reportLeaked(Leaked, "Initialized va_list", " is initialized again", C,
+                   N);
+    return;
+  }
+
+  State = State->set<VAListStateMap>(Arg, VAListState::Initialized);
+  C.addTransition(State);
+}
+
+void VAListChecker::checkVAListCopyCall(const CallEvent &Call,
+                                        CheckerContext &C) const {
+  const MemRegion *Arg1 =
+      getVAListAsRegion(Call.getArgSVal(0), Call.getArgExpr(0), C);
+  const MemRegion *Arg2 =
+      getVAListAsRegion(Call.getArgSVal(1), Call.getArgExpr(1), C);
+  if (!Arg1 || !Arg2)
+    return;
+
+  ProgramStateRef State = C.getState();
+  if (Arg1 == Arg2) {
+    RegionVector Leaked{Arg1};
+    if (ExplodedNode *N = C.addTransition(State))
+      reportLeaked(Leaked, "va_list", " is copied onto itself", C, N);
+    return;
+  }
+  VAListState State1 = getVAListState(State, Arg1);
+  VAListState State2 = getVAListState(State, Arg2);
+  // Update the ProgramState by copying the state of Arg2 to Arg1.
+  State = State->set<VAListStateMap>(Arg1, State2);
+  if (State1 == VAListState::Initialized) {
+    RegionVector Leaked{Arg1};
+    std::string Msg2 =
+        formatv(" is overwritten by {0} {1} one",
+                (State2 == VAListState::Initialized) ? "another" : "an",
+                describeState(State2));
+    if (ExplodedNode *N = C.addTransition(State))
+      reportLeaked(Leaked, "Initialized va_list", Msg2, C, N);
+    return;
+  }
+  if (State2 != VAListState::Initialized && State2 != VAListState::Unknown) {
+    std::string Msg = formatv("{0} va_list is copied", describeState(State2));
+    Msg[0] = toupper(Msg[0]);
+    reportUninitializedAccess(Arg2, Msg, C);
+    return;
+  }
+  C.addTransition(State);
+}
+
+void VAListChecker::checkVAListEndCall(const CallEvent &Call,
+                                       CheckerContext &C) const {
+  const MemRegion *Arg =
+      getVAListAsRegion(Call.getArgSVal(0), Call.getArgExpr(0), C);
+  if (!Arg)
+    return;
+
+  ProgramStateRef State = C.getState();
+  VAListState ArgState = getVAListState(State, Arg);
+
+  if (ArgState != VAListState::Unknown &&
+      ArgState != VAListState::Initialized) {
+    std::string Msg = formatv("va_end() is called on an {0} va_list",
+                              describeState(ArgState));
+    reportUninitializedAccess(Arg, Msg, C);
+    return;
+  }
+  State = State->set<VAListStateMap>(Arg, VAListState::Released);
+  C.addTransition(State);
+}
+
+PathDiagnosticPieceRef VAListChecker::VAListBugVisitor::VisitNode(
+    const ExplodedNode *N, BugReporterContext &BRC, PathSensitiveBugReport &) {
+  ProgramStateRef State = N->getState();
+  ProgramStateRef StatePrev = N->getFirstPred()->getState();
+
+  const Stmt *S = N->getStmtForDiagnostics();
+  if (!S)
+    return nullptr;
+
+  VAListState After = getVAListState(State, Reg);
+  VAListState Before = getVAListState(StatePrev, Reg);
+  if (Before == After)
+    return nullptr;
+
+  StringRef Msg;
+  switch (After) {
+  case VAListState::Uninitialized:
+    Msg = "Copied uninitialized contents into the va_list";
+    break;
+  case VAListState::Unknown:
+    Msg = "Copied unknown contents into the va_list";
+    break;
+  case VAListState::Initialized:
+    Msg = "Initialized va_list";
+    break;
+  case VAListState::Released:
+    Msg = "Ended va_list";
+    break;
+  }
+
+  if (Msg.empty())
+    return nullptr;
+
+  PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+                             N->getLocationContext());
+  return std::make_shared<PathDiagnosticEventPiece>(Pos, Msg, true);
+}
+
+void ento::registerVAListChecker(CheckerManager &Mgr) {
+  Mgr.registerChecker<VAListChecker>();
+}
+
+bool ento::shouldRegisterVAListChecker(const CheckerManager &) { return true; }
diff --git a/flang/include/flang/Lower/Cuda.h b/flang/include/flang/Lower/Cuda.h
new file mode 100644
index 0000000000000..704b0356c19ed
--- /dev/null
+++ b/flang/include/flang/Lower/Cuda.h
@@ -0,0 +1,74 @@
+//===-- Lower/CUDA.h -- CUDA Fortran utilities ------------------*- C++ 
-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM 
Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FORTRAN_LOWER_CUDA_H
+#define FORTRAN_LOWER_CUDA_H
+
+#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/MutableBox.h"
+#include "flang/Optimizer/Dialect/CUF/CUFOps.h"
+#include "flang/Runtime/allocator-registry-consts.h"
+#include "flang/Semantics/tools.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/OpenACC/OpenACC.h"
+
+namespace mlir {
+class Value;
+class Location;
+class MLIRContext;
+} // namespace mlir
+
+namespace hlfir {
+class ElementalOp;
+} // namespace hlfir
+
+namespace Fortran::lower {
+
+class AbstractConverter;
+
+static inline unsigned getAllocatorIdx(const Fortran::semantics::Symbol &sym) {
+  std::optional<Fortran::common::CUDADataAttr> cudaAttr =
+      Fortran::semantics::GetCUDADataAttr(&sym.GetUltimate());
+  if (cudaAttr) {
+    if (*cudaAttr == Fortran::common::CUDADataAttr::Pinned)
+      return kPinnedAllocatorPos;
+    if (*cudaAttr == Fortran::common::CUDADataAttr::Device)
+      return kDeviceAllocatorPos;
+    if (*cudaAttr == Fortran::common::CUDADataAttr::Managed)
+      return kManagedAllocatorPos;
+    if (*cudaAttr == Fortran::common::CUDADataAttr::Unified)
+      return kUnifiedAllocatorPos;
+  }
+  return kDefaultAllocator;
+}
+
+mlir::Type gatherDeviceComponentCoordinatesAndType(
+    fir::FirOpBuilder &builder, mlir::Location loc,
+    const Fortran::semantics::Symbol &sym, fir::RecordType recTy,
+    llvm::SmallVector<mlir::Value> &coordinates);
+
+/// Translate the CUDA Fortran attributes of \p sym into the FIR CUDA attribute
+/// representation.
+cuf::DataAttributeAttr
+translateSymbolCUFDataAttribute(mlir::MLIRContext *mlirContext,
+                                const Fortran::semantics::Symbol &sym);
+
+/// Check if the rhs has an implicit conversion. Return the elemental op if
+/// there is a conversion. Return null otherwise.
+hlfir::ElementalOp isTransferWithConversion(mlir::Value rhs);
+
+/// Check if the value is an allocatable with double descriptor.
+bool hasDoubleDescriptor(mlir::Value);
+
+} // end namespace Fortran::lower
+
+#endif // FORTRAN_LOWER_CUDA_H
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp 
b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index 6cf676fde752a..eea6c463d6290 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -218,10 +218,22 @@ static int getComplementOpc(int Opc) {
 // Changes form of comparison inclusive <-> exclusive.
 static AArch64CC::CondCode getAdjustedCmp(AArch64CC::CondCode Cmp) {
   switch (Cmp) {
-  case AArch64CC::GT: return AArch64CC::GE;
-  case AArch64CC::GE: return AArch64CC::GT;
-  case AArch64CC::LT: return AArch64CC::LE;
-  case AArch64CC::LE: return AArch64CC::LT;
+  case AArch64CC::GT:
+    return AArch64CC::GE;
+  case AArch64CC::GE:
+    return AArch64CC::GT;
+  case AArch64CC::LT:
+    return AArch64CC::LE;
+  case AArch64CC::LE:
+    return AArch64CC::LT;
+  case AArch64CC::HI:
+    return AArch64CC::HS;
+  case AArch64CC::HS:
+    return AArch64CC::HI;
+  case AArch64CC::LO:
+    return AArch64CC::LS;
+  case AArch64CC::LS:
+    return AArch64CC::LO;
   default:
     llvm_unreachable("Unexpected condition code");
   }
@@ -229,15 +241,20 @@ static AArch64CC::CondCode 
getAdjustedCmp(AArch64CC::CondCode Cmp) {
 
 // Transforms GT -> GE, GE -> GT, LT -> LE, LE -> LT by updating comparison
 // operator and condition code.
-AArch64ConditionOptimizer::CmpInfo AArch64ConditionOptimizer::adjustCmp(
-    MachineInstr *CmpMI, AArch64CC::CondCode Cmp) {
+AArch64ConditionOptimizer::CmpInfo
+AArch64ConditionOptimizer::adjustCmp(MachineInstr *CmpMI,
+                                     AArch64CC::CondCode Cmp) {
   unsigned Opc = CmpMI->getOpcode();
+  unsigned OldOpc = Opc;
+
+  bool IsSigned = Cmp == AArch64CC::GT || Cmp == AArch64CC::GE ||
+                  Cmp == AArch64CC::LT || Cmp == AArch64CC::LE;
 
   // CMN (compare with negative immediate) is an alias to ADDS (as
   // "operand - negative" == "operand + positive")
   bool Negative = (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri);
 
-  int Correction = (Cmp == AArch64CC::GT) ? 1 : -1;
+  int Correction = (Cmp == AArch64CC::GT || Cmp == AArch64CC::HI) ? 1 : -1;
   // Negate Correction value for comparison with negative immediate (CMN).
   if (Negative) {
     Correction = -Correction;
@@ -246,13 +263,23 @@ AArch64ConditionOptimizer::CmpInfo 
AArch64ConditionOptimizer::adjustCmp(
   const int OldImm = (int)CmpMI->getOperand(2).getImm();
   const int NewImm = std::abs(OldImm + Correction);
 
-  // Handle +0 -> -1 and -0 -> +1 (CMN with 0 immediate) transitions by
-  // adjusting compare instruction opcode.
-  if (OldImm == 0 && ((Negative && Correction == 1) ||
-                      (!Negative && Correction == -1))) {
+  // Handle cmn 1 -> cmp 0, transitions by adjusting compare instruction 
opcode.
+  if (OldImm == 1 && Negative && Correction == -1) {
+    // If we are adjusting from -1 to 0, we need to change the opcode.
+    Opc = getComplementOpc(Opc);
+  }
+
+  // Handle +0 -> -1 transitions by adjusting compare instruction opcode.
+  assert((OldImm != 0 || !Negative) && "Should not encounter cmn 0!");
+  if (OldImm == 0 && Correction == -1) {
     Opc = getComplementOpc(Opc);
   }
 
+  // If we change opcodes, this means we did an unsigned wrap, so return the 
old
+  // cmp.
+  if (!IsSigned && Opc != OldOpc)
+    return CmpInfo(OldImm, OldOpc, Cmp);
+
   return CmpInfo(NewImm, Opc, getAdjustedCmp(Cmp));
 }
 
@@ -331,6 +358,14 @@ bool AArch64ConditionOptimizer::isPureCmp(MachineInstr 
&CmpMI) {
   return true;
 }
 
+static bool isGreaterThan(AArch64CC::CondCode Cmp) {
+  return Cmp == AArch64CC::GT || Cmp == AArch64CC::HI;
+}
+
+static bool isLessThan(AArch64CC::CondCode Cmp) {
+  return Cmp == AArch64CC::LT || Cmp == AArch64CC::LO;
+}
+
 // This function transforms two CMP+CSINC pairs within the same basic block
 // when both conditions are the same (GT/GT or LT/LT) and immediates differ
 // by 1.
@@ -519,6 +554,9 @@ bool 
AArch64ConditionOptimizer::optimizeCrossBlock(MachineBasicBlock &HBB) {
   const int HeadImm = (int)HeadCmpMI->getOperand(2).getImm();
   const int TrueImm = (int)TrueCmpMI->getOperand(2).getImm();
 
+  int HeadImmTrueValue = HeadImm;
+  int TrueImmTrueValue = TrueImm;
+
   LLVM_DEBUG(dbgs() << "Head branch:\n");
   LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(HeadCmp)
                     << '\n');
@@ -529,9 +567,17 @@ bool 
AArch64ConditionOptimizer::optimizeCrossBlock(MachineBasicBlock &HBB) {
                     << '\n');
   LLVM_DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n');
 
-  if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::LT) ||
-       (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::GT)) &&
-      std::abs(TrueImm - HeadImm) == 2) {
+  unsigned Opc = HeadCmpMI->getOpcode();
+  if (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri)
+    HeadImmTrueValue = -HeadImmTrueValue;
+
+  Opc = TrueCmpMI->getOpcode();
+  if (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri)
+    TrueImmTrueValue = -TrueImmTrueValue;
+
+  if (((isGreaterThan(HeadCmp) && isLessThan(TrueCmp)) ||
+       (isLessThan(HeadCmp) && isGreaterThan(TrueCmp))) &&
+      std::abs(TrueImmTrueValue - HeadImmTrueValue) == 2) {
     // This branch transforms machine instructions that correspond to
     //
     // 1) (a > {TrueImm} && ...) || (a < {HeadImm} && ...)
@@ -550,9 +596,9 @@ bool 
AArch64ConditionOptimizer::optimizeCrossBlock(MachineBasicBlock &HBB) {
       modifyCmp(TrueCmpMI, TrueCmpInfo);
       return true;
     }
-  } else if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::GT) ||
-              (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::LT)) &&
-             std::abs(TrueImm - HeadImm) == 1) {
+  } else if (((isGreaterThan(HeadCmp) && isGreaterThan(TrueCmp)) ||
+              (isLessThan(HeadCmp) && isLessThan(TrueCmp))) &&
+             std::abs(TrueImmTrueValue - HeadImmTrueValue) == 1) {
     // This branch transforms machine instructions that correspond to
     //
     // 1) (a > {TrueImm} && ...) || (a > {HeadImm} && ...)
@@ -565,8 +611,8 @@ bool 
AArch64ConditionOptimizer::optimizeCrossBlock(MachineBasicBlock &HBB) {
 
     // GT -> GE transformation increases immediate value, so picking the
     // smaller one; LT -> LE decreases immediate value so invert the choice.
-    bool adjustHeadCond = (HeadImm < TrueImm);
-    if (HeadCmp == AArch64CC::LT) {
+    bool adjustHeadCond = (HeadImmTrueValue < TrueImmTrueValue);
+    if (isLessThan(HeadCmp)) {
       adjustHeadCond = !adjustHeadCond;
     }
 
diff --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll 
b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index 80eab2749ce10..2d23b526b031c 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -955,11 +955,11 @@ define i32 @combine_ugt_uge_10() #0 {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
-; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #10
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
-; CHECK-NEXT:    cmp w9, #11
-; CHECK-NEXT:    b.lo .LBB12_3
+; CHECK-NEXT:    b.ls .LBB12_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x9, :got:c
 ; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
@@ -971,7 +971,6 @@ define i32 @combine_ugt_uge_10() #0 {
 ; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB12_3: // %lor.lhs.false
-; CHECK-NEXT:    cmp w9, #10
 ; CHECK-NEXT:    b.lo .LBB12_6
 ; CHECK-NEXT:  .LBB12_4: // %land.lhs.true3
 ; CHECK-NEXT:    adrp x9, :got:d
@@ -1022,8 +1021,8 @@ define i32 @combine_ugt_ult_5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmp w8, #6
-; CHECK-NEXT:    b.lo .LBB13_3
+; CHECK-NEXT:    cmp w8, #5
+; CHECK-NEXT:    b.ls .LBB13_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:c
@@ -1037,8 +1036,7 @@ define i32 @combine_ugt_ult_5() #0 {
 ; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB13_3: // %lor.lhs.false
-; CHECK-NEXT:    cmp w8, #4
-; CHECK-NEXT:    b.hi .LBB13_6
+; CHECK-NEXT:    b.hs .LBB13_6
 ; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:d
@@ -1089,11 +1087,11 @@ define i32 @combine_ult_uge_5() #0 {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
-; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #5
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
-; CHECK-NEXT:    cmp w9, #4
-; CHECK-NEXT:    b.hi .LBB14_3
+; CHECK-NEXT:    b.hs .LBB14_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x9, :got:c
 ; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
@@ -1105,7 +1103,6 @@ define i32 @combine_ult_uge_5() #0 {
 ; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB14_3: // %lor.lhs.false
-; CHECK-NEXT:    cmp w9, #5
 ; CHECK-NEXT:    b.hi .LBB14_6
 ; CHECK-NEXT:  .LBB14_4: // %land.lhs.true3
 ; CHECK-NEXT:    adrp x9, :got:d
@@ -1156,8 +1153,8 @@ define i32 @combine_ult_ugt_5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmp w8, #4
-; CHECK-NEXT:    b.hi .LBB15_3
+; CHECK-NEXT:    cmp w8, #5
+; CHECK-NEXT:    b.hs .LBB15_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:c
@@ -1171,8 +1168,7 @@ define i32 @combine_ult_ugt_5() #0 {
 ; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB15_3: // %lor.lhs.false
-; CHECK-NEXT:    cmp w8, #6
-; CHECK-NEXT:    b.lo .LBB15_6
+; CHECK-NEXT:    b.ls .LBB15_6
 ; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:d
@@ -1224,8 +1220,8 @@ define i32 @combine_ugt_ult_n5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmn w8, #4
-; CHECK-NEXT:    b.lo .LBB16_3
+; CHECK-NEXT:    cmn w8, #5
+; CHECK-NEXT:    b.ls .LBB16_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:c
@@ -1239,8 +1235,7 @@ define i32 @combine_ugt_ult_n5() #0 {
 ; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB16_3: // %lor.lhs.false
-; CHECK-NEXT:    cmn w8, #6
-; CHECK-NEXT:    b.hi .LBB16_6
+; CHECK-NEXT:    b.hs .LBB16_6
 ; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:d
@@ -1292,8 +1287,8 @@ define i32 @combine_ult_ugt_n5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmn w8, #6
-; CHECK-NEXT:    b.hi .LBB17_3
+; CHECK-NEXT:    cmn w8, #5
+; CHECK-NEXT:    b.hs .LBB17_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:c
@@ -1307,8 +1302,7 @@ define i32 @combine_ult_ugt_n5() #0 {
 ; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB17_3: // %lor.lhs.false
-; CHECK-NEXT:    cmn w8, #4
-; CHECK-NEXT:    b.lo .LBB17_6
+; CHECK-NEXT:    b.ls .LBB17_6
 ; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:d
@@ -1361,8 +1355,8 @@ define i32 @combine_ult_gt_n5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmn w8, #6
-; CHECK-NEXT:    b.hi .LBB18_3
+; CHECK-NEXT:    cmn w8, #5
+; CHECK-NEXT:    b.hs .LBB18_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:c
@@ -1376,8 +1370,7 @@ define i32 @combine_ult_gt_n5() #0 {
 ; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB18_3: // %lor.lhs.false
-; CHECK-NEXT:    cmn w8, #4
-; CHECK-NEXT:    b.lt .LBB18_6
+; CHECK-NEXT:    b.le .LBB18_6
 ; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    adrp x9, :got:d

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to