This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rGf988f680649a: [Analysis] Add support for vscale in 
computeKnownBitsFromOperator (authored by david-arm).
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D109883/new/

https://reviews.llvm.org/D109883

Files:
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntb.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntd.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cnth.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntw.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_len-bfloat.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_len.c
  llvm/lib/Analysis/ValueTracking.cpp
  llvm/test/Transforms/InstCombine/icmp-vscale.ll
  llvm/test/Transforms/InstSimplify/vscale.ll
  llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll

Index: llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
===================================================================
--- llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -17,8 +17,8 @@
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[POINTER_PHI:%.*]] = phi i32* [ %c, %vector.ph ], [ %[[PTR_IND:.*]], %vector.body ]
 ; CHECK:         [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP6:%.*]] = shl i64 [[TMP5]], 2
-; CHECK-NEXT:    [[TMP7:%.*]] = shl i64 [[TMP5]], 4
+; CHECK-NEXT:    [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 2
+; CHECK-NEXT:    [[TMP7:%.*]] = shl nuw nsw i64 [[TMP5]], 4
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[VECTOR_GEP:%.*]] = shl <vscale x 4 x i64> [[TMP8]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr i32, i32* [[POINTER_PHI]], <vscale x 4 x i64> [[VECTOR_GEP]]
@@ -80,16 +80,16 @@
 ; CHECK-NEXT:    %[[LPTR1:.*]] = bitcast i32* %[[LGEP1]] to <vscale x 4 x i32>*
 ; CHECK-NEXT:    %{{.*}} = load <vscale x 4 x i32>, <vscale x 4 x i32>* %[[LPTR1]], align 4
 ; CHECK-NEXT:    %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    %[[TMP1:.*]] = shl i32 %[[VSCALE1]], 2
-; CHECK-NEXT:    %[[TMP2:.*]] = sext i32 %[[TMP1]] to i64
+; CHECK-NEXT:    %[[TMP1:.*]] = shl nuw nsw i32 %[[VSCALE1]], 2
+; CHECK-NEXT:    %[[TMP2:.*]] = zext i32 %[[TMP1]] to i64
 ; CHECK-NEXT:    %[[LGEP2:.*]] = getelementptr i32, i32* %[[LGEP1]], i64 %[[TMP2]]
 ; CHECK-NEXT:    %[[LPTR2:.*]] = bitcast i32* %[[LGEP2]] to <vscale x 4 x i32>*
 ; CHECK-NEXT:    %{{.*}} = load <vscale x 4 x i32>, <vscale x 4 x i32>* %[[LPTR2]], align 4
 ; CHECK:         %[[SPTR1:.*]] = bitcast i32* %[[SGEP1]] to <vscale x 4 x i32>*
 ; CHECK-NEXT:    store <vscale x 4 x i32> %{{.*}}, <vscale x 4 x i32>* %[[SPTR1]], align 4
 ; CHECK-NEXT:    %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    %[[TMP3:.*]] = shl i32 %[[VSCALE2]], 2
-; CHECK-NEXT:    %[[TMP4:.*]] = sext i32 %[[TMP3]] to i64
+; CHECK-NEXT:    %[[TMP3:.*]] = shl nuw nsw i32 %[[VSCALE2]], 2
+; CHECK-NEXT:    %[[TMP4:.*]] = zext i32 %[[TMP3]] to i64
 ; CHECK-NEXT:    %[[SGEP2:.*]] = getelementptr i32, i32* %[[SGEP1]], i64 %[[TMP4]]
 ; CHECK-NEXT:    %[[SPTR2:.*]] = bitcast i32* %[[SGEP2]] to <vscale x 4 x i32>*
 ; CHECK-NEXT:    store <vscale x 4 x i32> %{{.*}}, <vscale x 4 x i32>* %[[SPTR2]], align 4
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:  %[[APTRS1:.*]] = getelementptr i32, i32* %a, <vscale x 2 x i64> %[[VECIND1]]
 ; CHECK-NEXT:  %[[GEPA1:.*]] = getelementptr i32, i32* %a, i64 %[[IDX]]
 ; CHECK-NEXT:  %[[VSCALE64:.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:  %[[VSCALE64X2:.*]] = shl i64 %[[VSCALE64]], 1
+; CHECK-NEXT:  %[[VSCALE64X2:.*]] = shl nuw nsw i64 %[[VSCALE64]], 1
 ; CHECK-NEXT:  %[[TMP3:.*]] = insertelement <vscale x 2 x i64> poison, i64 %[[VSCALE64X2]], i32 0
 ; CHECK-NEXT:  %[[TMP4:.*]] = shufflevector <vscale x 2 x i64> %[[TMP3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
 ; CHECK-NEXT:  %[[TMP5:.*]] = add <vscale x 2 x i64> %[[TMP4]], %[[STEPVEC]]
@@ -147,8 +147,8 @@
 ; CHECK:       %[[BPTR1:.*]] = bitcast i32** %[[GEPB1]] to <vscale x 2 x i32*>*
 ; CHECK-NEXT:  store <vscale x 2 x i32*> %[[APTRS1]], <vscale x 2 x i32*>* %[[BPTR1]], align 8
 ; CHECK:       %[[VSCALE32:.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:  %[[VSCALE32X2:.*]] = shl i32 %[[VSCALE32]], 1
-; CHECK-NEXT:  %[[TMP6:.*]] = sext i32 %[[VSCALE32X2]] to i64
+; CHECK-NEXT:  %[[VSCALE32X2:.*]] = shl nuw nsw i32 %[[VSCALE32]], 1
+; CHECK-NEXT:  %[[TMP6:.*]] = zext i32 %[[VSCALE32X2]] to i64
 ; CHECK-NEXT:  %[[GEPB2:.*]] = getelementptr i32*, i32** %[[GEPB1]], i64 %[[TMP6]]
 ; CHECK-NEXT:  %[[BPTR2:.*]] = bitcast i32** %[[GEPB2]] to <vscale x 2 x i32*>*
 ; CHECK-NEXT   store <vscale x 2 x i32*> %[[APTRS2]], <vscale x 2 x i32*>* %[[BPTR2]], align 8
Index: llvm/test/Transforms/InstSimplify/vscale.ll
===================================================================
--- llvm/test/Transforms/InstSimplify/vscale.ll
+++ llvm/test/Transforms/InstSimplify/vscale.ll
@@ -128,6 +128,18 @@
   ret i32 %r
 }
 
+; Known values of vscale intrinsic
+
+define i64 @vscale64_range4_4() vscale_range(4,4) {
+; CHECK-LABEL: @vscale64_range4_4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret i64 4
+;
+entry:
+  %vscale = call i64 @llvm.vscale.i64()
+  ret i64 %vscale
+}
+
 ; more complicated expressions
 
 define <vscale x 2 x i1> @cmp_le_smax_always_true(<vscale x 2 x i64> %x) {
@@ -217,3 +229,6 @@
   %r = extractelement <vscale x 4 x i32> %splat, i32 %idx
   ret i32 %r
 }
+
+
+declare i64 @llvm.vscale.i64()
Index: llvm/test/Transforms/InstCombine/icmp-vscale.ll
===================================================================
--- /dev/null
+++ llvm/test/Transforms/InstCombine/icmp-vscale.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+define i1 @ugt_vscale64_x_32() vscale_range(0,16) {
+; CHECK-LABEL: @ugt_vscale64_x_32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret i1 false
+;
+entry:
+  %vscale = call i64 @llvm.vscale.i64()
+  %num_els = shl i64 %vscale, 5
+  %res = icmp ugt i64 %num_els, 1024
+  ret i1 %res
+}
+
+define i1 @ugt_vscale64_x_31() vscale_range(0,16) {
+; CHECK-LABEL: @ugt_vscale64_x_31(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret i1 false
+;
+entry:
+  %vscale = call i64 @llvm.vscale.i64()
+  %num_els = mul i64 %vscale, 31
+  %res = icmp ugt i64 %num_els, 1024
+  ret i1 %res
+}
+
+define i1 @ugt_vscale16_x_32() vscale_range(0,16) {
+; CHECK-LABEL: @ugt_vscale16_x_32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret i1 false
+;
+entry:
+  %vscale = call i16 @llvm.vscale.i16()
+  %num_els = shl i16 %vscale, 5
+  %res = icmp ugt i16 %num_els, 1024
+  ret i1 %res
+}
+
+define i1 @ult_vscale16() vscale_range(0,16) {
+; CHECK-LABEL: @ult_vscale16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret i1 false
+;
+entry:
+  %vscale = call i16 @llvm.vscale.i16()
+  %res = icmp ult i16 1024, %vscale
+  ret i1 %res
+}
+
+define i1 @ule_vscale64() vscale_range(0,16) {
+; CHECK-LABEL: @ule_vscale64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret i1 false
+;
+entry:
+  %vscale = call i64 @llvm.vscale.i64()
+  %res = icmp ule i64 1024, %vscale
+  ret i1 %res
+}
+
+define i1 @ueq_vscale64_range4_4() vscale_range(4,4) {
+; CHECK-LABEL: @ueq_vscale64_range4_4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret i1 true
+;
+entry:
+  %vscale = call i64 @llvm.vscale.i64()
+  %res = icmp eq i64 %vscale, 4
+  ret i1 %res
+}
+
+define i1 @ne_vscale64_x_32() vscale_range(0,16) {
+; CHECK-LABEL: @ne_vscale64_x_32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    ret i1 true
+;
+entry:
+  %vscale = call i64 @llvm.vscale.i64()
+  %num_els = mul i64 %vscale, 32
+  %res = icmp ne i64 %num_els, 39488
+  ret i1 %res
+}
+
+declare i8 @llvm.vscale.i8()
+declare i16 @llvm.vscale.i16()
+declare i32 @llvm.vscale.i32()
+declare i64 @llvm.vscale.i64()
Index: llvm/lib/Analysis/ValueTracking.cpp
===================================================================
--- llvm/lib/Analysis/ValueTracking.cpp
+++ llvm/lib/Analysis/ValueTracking.cpp
@@ -1689,6 +1689,32 @@
         if (BitWidth >= 32)
           Known.Zero.setBitsFrom(31);
         break;
+      case Intrinsic::vscale: {
+        if (!II->getFunction()->hasFnAttribute(Attribute::VScaleRange))
+          break;
+
+        auto VScaleRange = II->getFunction()
+                               ->getFnAttribute(Attribute::VScaleRange)
+                               .getVScaleRangeArgs();
+
+        if (VScaleRange.second == 0)
+          break;
+
+        // If vscale min = max then we know the exact value at compile time
+        // and hence we know the exact bits.
+        if (VScaleRange.first == VScaleRange.second) {
+          Known.One = VScaleRange.first;
+          Known.Zero = VScaleRange.first;
+          Known.Zero.flipAllBits();
+          break;
+        }
+
+        unsigned FirstZeroHighBit = 32 - countLeadingZeros(VScaleRange.second);
+        if (FirstZeroHighBit < BitWidth)
+          Known.Zero.setBitsFrom(FirstZeroHighBit);
+
+        break;
+      }
       }
     }
     break;
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_len.c
===================================================================
--- clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_len.c
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_len.c
@@ -17,13 +17,13 @@
 // CHECK-LABEL: @test_svlen_s8(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z13test_svlen_s8u10__SVInt8_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_s8(svint8_t op)
@@ -34,13 +34,13 @@
 // CHECK-LABEL: @test_svlen_s16(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_s16u11__SVInt16_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_s16(svint16_t op)
@@ -51,13 +51,13 @@
 // CHECK-LABEL: @test_svlen_s32(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_s32u11__SVInt32_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_s32(svint32_t op)
@@ -68,13 +68,13 @@
 // CHECK-LABEL: @test_svlen_s64(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_s64u11__SVInt64_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_s64(svint64_t op)
@@ -85,13 +85,13 @@
 // CHECK-LABEL: @test_svlen_u8(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z13test_svlen_u8u11__SVUint8_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_u8(svuint8_t op)
@@ -102,13 +102,13 @@
 // CHECK-LABEL: @test_svlen_u16(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_u16u12__SVUint16_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_u16(svuint16_t op)
@@ -119,13 +119,13 @@
 // CHECK-LABEL: @test_svlen_u32(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_u32u12__SVUint32_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_u32(svuint32_t op)
@@ -136,13 +136,13 @@
 // CHECK-LABEL: @test_svlen_u64(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_u64u12__SVUint64_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_u64(svuint64_t op)
@@ -153,13 +153,13 @@
 // CHECK-LABEL: @test_svlen_f16(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_f16u13__SVFloat16_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_f16(svfloat16_t op)
@@ -170,13 +170,13 @@
 // CHECK-LABEL: @test_svlen_f32(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_f32u13__SVFloat32_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_f32(svfloat32_t op)
@@ -187,13 +187,13 @@
 // CHECK-LABEL: @test_svlen_f64(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z14test_svlen_f64u13__SVFloat64_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_f64(svfloat64_t op)
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_len-bfloat.c
===================================================================
--- clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_len-bfloat.c
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_len-bfloat.c
@@ -17,13 +17,13 @@
 // CHECK-LABEL: @test_svlen_bf16(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z15test_svlen_bf16u14__SVBFloat16_t(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svlen_bf16(svbfloat16_t op)
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntw.c
===================================================================
--- clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntw.c
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntw.c
@@ -8,13 +8,13 @@
 // CHECK-LABEL: @test_svcntw(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z11test_svcntwv(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svcntw()
@@ -257,13 +257,13 @@
 // CHECK-LABEL: @test_svcntw_pat_16(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z18test_svcntw_pat_16v(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svcntw_pat_16()
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cnth.c
===================================================================
--- clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cnth.c
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cnth.c
@@ -8,13 +8,13 @@
 // CHECK-LABEL: @test_svcnth(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z11test_svcnthv(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svcnth()
@@ -249,13 +249,13 @@
 // CHECK-LABEL: @test_svcnth_pat_16(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z18test_svcnth_pat_16v(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svcnth_pat_16()
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntd.c
===================================================================
--- clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntd.c
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntd.c
@@ -8,13 +8,13 @@
 // CHECK-LABEL: @test_svcntd(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z11test_svcntdv(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svcntd()
@@ -261,13 +261,13 @@
 // CHECK-LABEL: @test_svcntd_pat_16(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z18test_svcntd_pat_16v(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svcntd_pat_16()
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntb.c
===================================================================
--- clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntb.c
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cntb.c
@@ -8,13 +8,13 @@
 // CHECK-LABEL: @test_svcntb(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z11test_svcntbv(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svcntb()
@@ -247,13 +247,13 @@
 // CHECK-LABEL: @test_svcntb_pat_16(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
+// CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
 // CHECK-NEXT:    ret i64 [[TMP1]]
 //
 // CPP-CHECK-LABEL: @_Z18test_svcntb_pat_16v(
 // CPP-CHECK-NEXT:  entry:
 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
 // CPP-CHECK-NEXT:    ret i64 [[TMP1]]
 //
 uint64_t test_svcntb_pat_16()
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to