llvmbot wrote:

<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-clangir

Author: Andrzej Warzyński (banach-space)

<details>
<summary>Changes</summary>

This patch performs small cleanups and fixes in the AArch64 builtins
lowering code, with the goal of aligning the CIR path more closely
with the existing Clang CodeGen implementation.

Changes include:
* Update tests to account for recent `noundef` support in CIR.
* Rename `AArch64BuiltinInfo` to `armVectorIntrinsicInfo` for better
  consistency with the original CodeGen implementation.
* Simplify `emitAArch64CompareBuiltinExpr`, fix an incorrect
  assert condition (missing `!`) and make sure to use the input `kind`
  condition instead of hard-coding `cir::CmpOpKind::eq`.
* Improve and clarify comments.

No functional changes intended (NFC).


---

Patch is 43.04 KiB, truncated to 20.00 KiB below, full version: 
https://github.com/llvm/llvm-project/pull/184401.diff


5 Files Affected:

- (modified) clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp (+30-22) 
- (modified) clang/lib/CodeGen/TargetBuiltins/ARM.cpp (+11-1) 
- (modified) clang/test/CIR/CodeGenBuiltins/AArch64/acle_sve_dup.c (+42-42) 
- (modified) clang/test/CodeGen/AArch64/neon/fullfp16.c (+4-8) 
- (modified) clang/test/CodeGen/AArch64/neon/intrinsics.c (+4-5) 


``````````diff
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index df85ba7186775..b62960f4543df 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -40,10 +40,17 @@ static mlir::Value genVscaleTimesFactor(mlir::Location loc,
                                builder.getUInt64(scalingFactor, loc));
 }
 
+//===----------------------------------------------------------------------===//
+//  Intrinsics maps
+//
+//  Maps that help automate code-generation.
+//
+// TODO(cir): Share this code with ARM.cpp
+//===----------------------------------------------------------------------===//
 static bool aarch64SVEIntrinsicsProvenSorted = false;
 
 namespace {
-struct AArch64BuiltinInfo {
+struct armVectorIntrinsicInfo {
   unsigned builtinID;
   unsigned llvmIntrinsic;
   uint64_t typeModifier;
@@ -51,7 +58,7 @@ struct AArch64BuiltinInfo {
   bool operator<(unsigned rhsBuiltinID) const {
     return builtinID < rhsBuiltinID;
   }
-  bool operator<(const AArch64BuiltinInfo &te) const {
+  bool operator<(const armVectorIntrinsicInfo &te) const {
     return builtinID < te.builtinID;
   }
 };
@@ -62,14 +69,16 @@ struct AArch64BuiltinInfo {
 
 #define SVEMAP2(NameBase, TypeModifier)                                        
\
   {SVE::BI__builtin_sve_##NameBase, 0, TypeModifier}
-static const AArch64BuiltinInfo aarch64SVEIntrinsicMap[] = {
+static const armVectorIntrinsicInfo aarch64SVEIntrinsicMap[] = {
 #define GET_SVE_LLVM_INTRINSIC_MAP
 #include "clang/Basic/arm_sve_builtin_cg.inc"
 #undef GET_SVE_LLVM_INTRINSIC_MAP
 };
 
-static const AArch64BuiltinInfo *
-findARMVectorIntrinsicInMap(ArrayRef<AArch64BuiltinInfo> intrinsicMap,
+// Check if Builtin `builtinId` is present in `intrinsicMap`. If yes, returns
+// the corresponding info struct.
+static const armVectorIntrinsicInfo *
+findARMVectorIntrinsicInMap(ArrayRef<armVectorIntrinsicInfo> intrinsicMap,
                             unsigned builtinID, bool &mapProvenSorted) {
 
 #ifndef NDEBUG
@@ -79,7 +88,8 @@ findARMVectorIntrinsicInMap(ArrayRef<AArch64BuiltinInfo> 
intrinsicMap,
   }
 #endif
 
-  const AArch64BuiltinInfo *info = llvm::lower_bound(intrinsicMap, builtinID);
+  const armVectorIntrinsicInfo *info =
+      llvm::lower_bound(intrinsicMap, builtinID);
 
   if (info != intrinsicMap.end() && info->builtinID == builtinID)
     return info;
@@ -97,29 +107,27 @@ emitAArch64CompareBuiltinExpr(CIRGenFunction &cgf, 
CIRGenBuilderTy &builder,
 
   bool scalarCmp = !isa<cir::VectorType>(src.getType());
   if (!scalarCmp) {
-    assert(cast<cir::VectorType>(retTy).getIsScalable() &&
+    assert(!cast<cir::VectorType>(retTy).getIsScalable() &&
            "This is only intended for fixed-width vectors");
-    // Vector retTypes are cast to i8 vectors. Recover original retType.
+    // Vector types are cast to i8 vectors. Recover original type.
     cgf.cgm.errorNYI(loc, std::string("unimplemented vector compare"));
   }
 
   mlir::Value zero = builder.getNullValue(src.getType(), loc);
-  mlir::Value cmp;
   if (cir::isFPOrVectorOfFPType(src.getType())) {
     cgf.cgm.errorNYI(loc, std::string("unimplemented FP compare"));
-  } else {
-    if (scalarCmp)
-      // For scalars, cast !cir.bool to !cir.int<s, 1> so that the compare
-      // result is sign- rather zero-extended when casting to the output
-      // retType.
-      cmp = builder.createCast(
-          loc, cir::CastKind::bool_to_int,
-          builder.createCompare(loc, cir::CmpOpKind::eq, src, zero),
-          builder.getSIntNTy(1));
-    else
-      cgf.cgm.errorNYI(loc, std::string("unimplemented vector compare"));
   }
 
+  if (!scalarCmp)
+    cgf.cgm.errorNYI(loc, std::string("unimplemented vector compare"));
+
+  // For scalars, cast !cir.bool to !cir.int<s, 1> so that the compare
+  // result is sign- rather zero-extended when casting to the output
+  // retType.
+  mlir::Value cmp = builder.createCast(
+      loc, cir::CastKind::bool_to_int,
+      builder.createCompare(loc, kind, src, zero), builder.getSIntNTy(1));
+
   return builder.createCast(loc, cir::CastKind::integral, cmp, retTy);
 }
 
@@ -243,7 +251,7 @@ static unsigned 
getSVEMinEltCount(clang::SVETypeFlags::EltType sveType) {
   }
 }
 
-// TODO: Share with OGCG
+// TODO(cir): Share with OGCG
 constexpr unsigned sveBitsPerBlock = 128;
 
 static cir::VectorType getSVEVectorForElementType(CIRGenModule &cgm,
@@ -261,7 +269,7 @@ static cir::VectorType 
getSVEVectorForElementType(CIRGenModule &cgm,
 /// for Sema checking (see `CheckNeonBuiltinFunctionCall`) and this function
 /// should be kept consistent with the logic in Sema.
 /// TODO: Make this return false for SISD builtins.
-/// TODO: Share this with ARM.cpp
+/// TODO(cir): Share this with ARM.cpp
 static bool hasExtraNeonArgument(unsigned builtinID) {
   // Required by the headers included below, but not in this particular
   // function.
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp 
b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 62920044405be..aa95e92b9f2e9 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -534,6 +534,11 @@ Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, 
Value *Shift,
   return Builder.CreateAShr(Vec, Shift, name);
 }
 
+//===----------------------------------------------------------------------===//
+//  Intrinsics maps
+//
+//  Maps that help automate code-generation.
+//===----------------------------------------------------------------------===//
 enum {
   AddRetType = (1 << 0),
   Add1ArgType = (1 << 1),
@@ -1654,6 +1659,8 @@ static bool AArch64SISDIntrinsicsProvenSorted = false;
 static bool AArch64SVEIntrinsicsProvenSorted = false;
 static bool AArch64SMEIntrinsicsProvenSorted = false;
 
+// Check if Builtin `BuiltinId` is present in `IntrinsicMap`. If yes, returns
+// the corresponding info struct.
 static const ARMVectorIntrinsicInfo *
 findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
                             unsigned BuiltinID, bool &MapProvenSorted) {
@@ -1783,7 +1790,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
     const char *NameHint, unsigned Modifier, const CallExpr *E,
     SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
     llvm::Triple::ArchType Arch) {
-  // Get the last argument, which specifies the vector type.
+
+  // Extract the trailing immediate argument that encodes the type 
discriminator
+  // for this overloaded intrinsic.
+  // TODO: Move to the parent code that takes care of argument processing.
   const Expr *Arg = E->getArg(E->getNumArgs() - 1);
   std::optional<llvm::APSInt> NeonTypeConst =
       Arg->getIntegerConstantExpr(getContext());
diff --git a/clang/test/CIR/CodeGenBuiltins/AArch64/acle_sve_dup.c 
b/clang/test/CIR/CodeGenBuiltins/AArch64/acle_sve_dup.c
index 645305e142585..5fb4a3ab7483f 100644
--- a/clang/test/CIR/CodeGenBuiltins/AArch64/acle_sve_dup.c
+++ b/clang/test/CIR/CodeGenBuiltins/AArch64/acle_sve_dup.c
@@ -35,7 +35,7 @@ svint8_t test_svdup_n_s8(int8_t op) MODE_ATTR
 {
 // CIR:           cir.call_llvm_intrinsic "aarch64.sve.dup.x" %{{.*}} : (!s8i) 
-> !cir.vector<[16] x !s8i>
 
-// LLVM_OGCG_CIR-SAME: i8{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: i8 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.x.nxv16i8(i8 [[OP]])
 // LLVM_OGCG_CIR:    ret <vscale x 16 x i8> [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_s8,)(op);
@@ -46,7 +46,7 @@ svint16_t test_svdup_n_s16(int16_t op) MODE_ATTR
 {
 // CIR:           cir.call_llvm_intrinsic "aarch64.sve.dup.x" %{{.*}} : 
(!s16i) -> !cir.vector<[8] x !s16i>
 
-// LLVM_OGCG_CIR-SAME: i16{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: i16 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.x.nxv8i16(i16 [[OP]])
 // LLVM_OGCG_CIR:    ret <vscale x 8 x i16> [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_s16,)(op);
@@ -57,7 +57,7 @@ svint32_t test_svdup_n_s32(int32_t op) MODE_ATTR
 {
 // CIR:           cir.call_llvm_intrinsic "aarch64.sve.dup.x" %{{.*}} : 
(!s32i) -> !cir.vector<[4] x !s32i>
 
-// LLVM_OGCG_CIR-SAME: i32{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: i32 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.x.nxv4i32(i32 [[OP]])
 // LLVM_OGCG_CIR:    ret <vscale x 4 x i32> [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_s32,)(op);
@@ -68,7 +68,7 @@ svint64_t test_svdup_n_s64(int64_t op) MODE_ATTR
 {
 // CIR:           cir.call_llvm_intrinsic "aarch64.sve.dup.x" %{{.*}} : 
(!s64i) -> !cir.vector<[2] x !s64i>
 
-// LLVM_OGCG_CIR-SAME: i64{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: i64 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.x.nxv2i64(i64 [[OP]])
 // LLVM_OGCG_CIR:    ret <vscale x 2 x i64> [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_s64,)(op);
@@ -79,7 +79,7 @@ svuint8_t test_svdup_n_u8(uint8_t op) MODE_ATTR
 {
 // CIR:           cir.call_llvm_intrinsic "aarch64.sve.dup.x" %{{.*}} : (!u8i) 
-> !cir.vector<[16] x !u8i>
 
-// LLVM_OGCG_CIR-SAME: i8{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: i8 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.x.nxv16i8(i8 [[OP]])
 // LLVM_OGCG_CIR:    ret <vscale x 16 x i8> [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_u8,)(op);
@@ -100,7 +100,7 @@ svuint32_t test_svdup_n_u32(uint32_t op) MODE_ATTR
 {
 // CIR:           cir.call_llvm_intrinsic "aarch64.sve.dup.x" %{{.*}} : 
(!u32i) -> !cir.vector<[4] x !u32i>
 
-// LLVM_OGCG_CIR-SAME: i32{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: i32 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.x.nxv4i32(i32 [[OP]])
 // LLVM_OGCG_CIR:    ret <vscale x 4 x i32> [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_u32,)(op);
@@ -111,7 +111,7 @@ svuint64_t test_svdup_n_u64(uint64_t op) MODE_ATTR
 {
 // CIR:           cir.call_llvm_intrinsic "aarch64.sve.dup.x" %{{.*}} : 
(!u64i) -> !cir.vector<[2] x !u64i>
 
-// LLVM_OGCG_CIR-SAME: i64{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: i64 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.x.nxv2i64(i64 [[OP]])
 // LLVM_OGCG_CIR:    ret <vscale x 2 x i64> [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_u64,)(op);
@@ -161,7 +161,7 @@ svint8_t test_svdup_n_s8_z(svbool_t pg, int8_t op) MODE_ATTR
 // CIR:           %[[CALL_DUP:.*]] = cir.call_llvm_intrinsic "aarch64.sve.dup" 
%[[CONST_0]], %{{.*}}, %{{.*}} :
 // CIR-SAME:        -> !cir.vector<[16] x !s8i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i8{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i8 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 
x i1> [[PG]], i8 [[OP]])
 // LLVM_OGCG_CIR:     ret {{.*}} [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_s8_z,)(pg, op);
@@ -176,7 +176,7 @@ svint16_t test_svdup_n_s16_z(svbool_t pg, int16_t op) 
MODE_ATTR
 // CIR:           %[[CALL_DUP:.*]] = cir.call_llvm_intrinsic "aarch64.sve.dup" 
%[[CONST_0]], %[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:          -> !cir.vector<[8] x !s16i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i16{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i16 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x 
i1> [[PG_CONVERTED]], i16 [[OP]])
 // LLVM_OGCG_CIR:    ret {{.*}} [[RES]]
@@ -192,7 +192,7 @@ svint32_t test_svdup_n_s32_z(svbool_t pg, int32_t op) 
MODE_ATTR
 // CIR:           %[[CALL_DUP:.*]] = cir.call_llvm_intrinsic "aarch64.sve.dup" 
%[[CONST_0]], %[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:        -> !cir.vector<[4] x !s32i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i32{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i32 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x 
i1> [[PG_CONVERTED]], i32 [[OP]])
 // LLVM_OGCG_CIR:    ret {{.*}} [[RES]]
@@ -208,7 +208,7 @@ svint64_t test_svdup_n_s64_z(svbool_t pg, int64_t op) 
MODE_ATTR
 // CIR:           %[[CALL_DUP:.*]] = cir.call_llvm_intrinsic "aarch64.sve.dup" 
%[[CONST_0]], %[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:        -> !cir.vector<[2] x !s64i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i64{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i64 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x 
i1> [[PG_CONVERTED]], i64 [[OP]])
 // LLVM_OGCG_CIR:    ret {{.*}} [[RES]]
@@ -222,7 +222,7 @@ svuint8_t test_svdup_n_u8_z(svbool_t pg, uint8_t op) 
MODE_ATTR
 // CIR:           %[[CONVERT_PG:.*]] = cir.call_llvm_intrinsic 
"aarch64.sve.dup" %[[CONST_0]], %{{.*}}, %{{.*}} :
 // CIR-SAME:        -> !cir.vector<[16] x !u8i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i8{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i8 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 
x i1> [[PG]], i8 [[OP]])
 // LLVM_OGCG_CIR:    ret {{.*}} [[RES]]
   return SVE_ACLE_FUNC(svdup,_n,_u8_z,)(pg, op);
@@ -237,7 +237,7 @@ svuint16_t test_svdup_n_u16_z(svbool_t pg, uint16_t op) 
MODE_ATTR
 // CIR:           %[[CALL_DUP:.*]] = cir.call_llvm_intrinsic "aarch64.sve.dup" 
%[[CONST_0]], %[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:          -> !cir.vector<[8] x !u16i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i16{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i16 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x 
i1> [[PG_CONVERTED]], i16 [[OP]])
 // LLVM_OGCG_CIR:    ret {{.*}} [[RES]]
@@ -253,7 +253,7 @@ svuint32_t test_svdup_n_u32_z(svbool_t pg, uint32_t op) 
MODE_ATTR
 // CIR:           %[[CALL_DUP:.*]] = cir.call_llvm_intrinsic "aarch64.sve.dup" 
%[[CONST_0]], %[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:        -> !cir.vector<[4] x !u32i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i32{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i32 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x 
i1> [[PG_CONVERTED]], i32 [[OP]])
 // LLVM_OGCG_CIR:    ret {{.*}} [[RES]]
@@ -269,7 +269,7 @@ svuint64_t test_svdup_n_u64_z(svbool_t pg, uint64_t op) 
MODE_ATTR
 // CIR:           %[[CALL_DUP:.*]] = cir.call_llvm_intrinsic "aarch64.sve.dup" 
%[[CONST_0]], %[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:        -> !cir.vector<[2] x !u64i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i64{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i1> [[PG:%.*]], i64 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR:    [[RES:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x 
i1> [[PG_CONVERTED]], i64 [[OP]])
 // LLVM_OGCG_CIR:    ret {{.*}} [[RES]]
@@ -333,7 +333,7 @@ svint8_t test_svdup_n_s8_m(svint8_t inactive, svbool_t pg, 
int8_t op) MODE_ATTR
 // CIR:           cir.call_llvm_intrinsic "aarch64.sve.dup" %{{.*}}, %{{.*}}, 
%{{.*}} :
 // CIR-SAME:        (!cir.vector<[16] x !s8i>, !cir.vector<[16] x !cir.int<u, 
1>>, !s8i) -> !cir.vector<[16] x !s8i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i8{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i8 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[TMP0:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE]], <vscale x 16 x 
i1> [[PG]], i8 [[OP]])
 // LLVM_OGCG_CIR-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
   return SVE_ACLE_FUNC(svdup,_n,_s8_m,)(inactive, pg, op);
@@ -347,7 +347,7 @@ svint16_t test_svdup_n_s16_m(svint16_t inactive, svbool_t 
pg, int16_t op) MODE_A
 // CIR:       cir.call_llvm_intrinsic "aarch64.sve.dup" %{{.*}}, 
%[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:    (!cir.vector<[8] x !s16i>, !cir.vector<[8] x !cir.int<u, 1>>, 
!s16i) -> !cir.vector<[8] x !s16i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i16{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i16 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR-NEXT:    [[RES:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE]], <vscale x 8 x 
i1> [[PG_CONVERTED]], i16 [[OP]])
 // LLVM_OGCG_CIR-NEXT:    ret <vscale x 8 x i16> [[RES]]
@@ -362,7 +362,7 @@ svint32_t test_svdup_n_s32_m(svint32_t inactive, svbool_t 
pg, int32_t op) MODE_A
 // CIR:       cir.call_llvm_intrinsic "aarch64.sve.dup" %{{.*}}, 
%[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:    (!cir.vector<[4] x !s32i>, !cir.vector<[4] x !cir.int<u, 1>>, 
!s32i) -> !cir.vector<[4] x !s32i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i32{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i32 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR-NEXT:    [[RES:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE]], <vscale x 4 x 
i1> [[PG_CONVERTED]], i32 [[OP]])
 // LLVM_OGCG_CIR-NEXT:    ret <vscale x 4 x i32> [[RES]]
@@ -377,7 +377,7 @@ svint64_t test_svdup_n_s64_m(svint64_t inactive, svbool_t 
pg, int64_t op) MODE_A
 // CIR:       cir.call_llvm_intrinsic "aarch64.sve.dup" %{{.*}}, 
%[[CONVERT_PG]], %{{.*}} :
 // CIR-SAME:   (!cir.vector<[2] x !s64i>, !cir.vector<[2] x !cir.int<u, 1>>, 
!s64i) -> !cir.vector<[2] x !s64i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i64{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i64 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG]])
 // LLVM_OGCG_CIR-NEXT:    [[RES:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE]], <vscale x 2 x 
i1> [[PG_CONVERTED]], i64 [[OP]])
 // LLVM_OGCG_CIR-NEXT:    ret <vscale x 2 x i64> [[RES]]
@@ -390,7 +390,7 @@ svuint8_t test_svdup_n_u8_m(svuint8_t inactive, svbool_t 
pg, uint8_t op) MODE_AT
 // CIR:       cir.call_llvm_intrinsic "aarch64.sve.dup" %{{.*}}, %{{.*}}, 
%{{.*}} :
 // CIR-SAME:   (!cir.vector<[16] x !u8i>, !cir.vector<[16] x !cir.int<u, 1>>, 
!u8i) -> !cir.vector<[16] x !u8i>
 
-// LLVM_OGCG_CIR-SAME: <vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i8{{.*}} [[OP:%.*]])
+// LLVM_OGCG_CIR-SAME: <vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> 
[[PG:%.*]], i8 noundef [[OP:%.*]])
 // LLVM_OGCG_CIR:    [[PG_CONVERTED:%.*]] = call <vscale x 16 x i8> @llv...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/184401
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to