https://github.com/ArfiH updated 
https://github.com/llvm/llvm-project/pull/185992

>From 6bc372e7e3c1eedbc6ab25cbdb0a6cb03b7304a8 Mon Sep 17 00:00:00 2001
From: ArfiH <[email protected]>
Date: Wed, 11 Mar 2026 19:28:35 +0530
Subject: [PATCH] [CIR][AArch64] Implement vrshr_n_v and vrshrd_n_s64/u64 NEON
 builtins

Implement the following AArch64 NEON rounding right shift builtins in CIR:
- vrshr_n_v / vrshrq_n_v (vector variants, all element types)
- vrshrd_n_s64 / vrshrd_n_u64 (scalar 64-bit variants)

Part of issue #185382.
---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  |  67 +++++-
 clang/test/CodeGen/AArch64/neon-intrinsics.c  | 204 ------------------
 clang/test/CodeGen/AArch64/neon/intrinsics.c  | 152 +++++++++++++
 3 files changed, 211 insertions(+), 212 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 5534e69b5f8bc..58e10374765b5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -831,6 +831,25 @@ static cir::VectorType getNeonType(CIRGenFunction *cgf, 
NeonTypeFlags typeFlags,
   llvm_unreachable("Unknown vector element type!");
 }
 
+// Forward declarations — defined after hasExtraNeonArgument below.
+template <typename Operation>
+static mlir::Value
+emitNeonCallToOp(CIRGenModule &cgm, CIRGenBuilderTy &builder,
+                 llvm::SmallVector<mlir::Type> argTypes,
+                 llvm::SmallVectorImpl<mlir::Value> &args,
+                 std::optional<llvm::StringRef> intrinsicName,
+                 mlir::Type funcResTy, mlir::Location loc,
+                 bool isConstrainedFPIntrinsic = false, unsigned shift = 0,
+                 bool rightshift = false);
+
+static mlir::Value emitNeonCall(CIRGenModule &cgm, CIRGenBuilderTy &builder,
+                                llvm::SmallVector<mlir::Type> argTypes,
+                                llvm::SmallVectorImpl<mlir::Value> &args,
+                                llvm::StringRef intrinsicName,
+                                mlir::Type funcResTy, mlir::Location loc,
+                                bool isConstrainedFPIntrinsic = false,
+                                unsigned shift = 0, bool rightshift = false);
+
 static mlir::Value emitCommonNeonBuiltinExpr(
     CIRGenFunction &cgf, unsigned builtinID, unsigned llvmIntrinsic,
     unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier,
@@ -1047,8 +1066,6 @@ static mlir::Value emitCommonNeonBuiltinExpr(
   case NEON::BI__builtin_neon_vrsqrteq_v:
   case NEON::BI__builtin_neon_vrndi_v:
   case NEON::BI__builtin_neon_vrndiq_v:
-  case NEON::BI__builtin_neon_vrshr_n_v:
-  case NEON::BI__builtin_neon_vrshrq_n_v:
   case NEON::BI__builtin_neon_vsha512hq_u64:
   case NEON::BI__builtin_neon_vsha512h2q_u64:
   case NEON::BI__builtin_neon_vsha512su0q_u64:
@@ -1118,6 +1135,17 @@ static mlir::Value emitCommonNeonBuiltinExpr(
   case NEON::BI__builtin_neon_vbfdot_f32:
   case NEON::BI__builtin_neon_vbfdotq_f32:
   case NEON::BI__builtin_neon___a32_vcvt_bf16_f32:
+  case NEON::BI__builtin_neon_vrshr_n_v:
+  case NEON::BI__builtin_neon_vrshrq_n_v: {
+    // srshl/urshl are left-shift intrinsics; a negative shift performs a
+    // rounding right-shift. The shift amount is negated via rightshift=true.
+    bool isUnsigned = neonType.isUnsigned();
+    llvm::StringRef intrName =
+        isUnsigned ? "aarch64.neon.urshl" : "aarch64.neon.srshl";
+    return emitNeonCall(cgf.cgm, cgf.getBuilder(), {ty, ty}, ops, intrName, ty,
+                        loc, /*isConstrainedFP=*/false, /*shift=*/1,
+                        /*rightshift=*/true);
+  }
   default:
     cgf.cgm.errorNYI(expr->getSourceRange(),
                      std::string("unimplemented AArch64 builtin call: ") +
@@ -1306,8 +1334,8 @@ emitNeonCallToOp(CIRGenModule &cgm, CIRGenBuilderTy 
&builder,
                  llvm::SmallVectorImpl<mlir::Value> &args,
                  std::optional<llvm::StringRef> intrinsicName,
                  mlir::Type funcResTy, mlir::Location loc,
-                 bool isConstrainedFPIntrinsic = false, unsigned shift = 0,
-                 bool rightshift = false) {
+                 bool isConstrainedFPIntrinsic, unsigned shift,
+                 bool rightshift) {
   // TODO(cir): Consider removing the following unreachable when we have
   // emitConstrainedFPCall feature implemented
   assert(!cir::MissingFeatures::emitConstrainedFPCall());
@@ -1319,7 +1347,21 @@ emitNeonCallToOp(CIRGenModule &cgm, CIRGenBuilderTy 
&builder,
       assert(!cir::MissingFeatures::emitConstrainedFPCall());
     }
     if (shift > 0 && shift == j) {
-      cgm.errorNYI(loc, std::string("intrinsic requiring a shift Op"));
+      int sv = cast<cir::IntAttr>(
+                   cast<cir::ConstantOp>(args[j].getDefiningOp()).getValue())
+                   .getSInt();
+      // Create a scalar constant of the element type (with matching bitwidth).
+      // Then splat to a vector if needed.
+      auto vecTy = mlir::dyn_cast<cir::VectorType>(argTypes[j]);
+      mlir::Type eltTy = vecTy ? vecTy.getElementType() : argTypes[j];
+      unsigned width = mlir::cast<cir::IntType>(eltTy).getWidth();
+      llvm::APSInt shiftVal(width, /*isUnsigned=*/false);
+      shiftVal = rightshift ? -sv : sv;
+      mlir::Value scalarConst = cir::ConstantOp::create(
+          builder, loc, eltTy, cir::IntAttr::get(eltTy, shiftVal));
+      args[j] = vecTy
+                    ? cir::VecSplatOp::create(builder, loc, vecTy, scalarConst)
+                    : scalarConst;
     } else {
       args[j] = builder.createBitcast(args[j], argTypes[j]);
     }
@@ -1344,8 +1386,8 @@ static mlir::Value emitNeonCall(CIRGenModule &cgm, 
CIRGenBuilderTy &builder,
                                 llvm::SmallVectorImpl<mlir::Value> &args,
                                 llvm::StringRef intrinsicName,
                                 mlir::Type funcResTy, mlir::Location loc,
-                                bool isConstrainedFPIntrinsic = false,
-                                unsigned shift = 0, bool rightshift = false) {
+                                bool isConstrainedFPIntrinsic,
+                                unsigned shift, bool rightshift) {
   return emitNeonCallToOp<cir::LLVMIntrinsicCallOp>(
       cgm, builder, std::move(argTypes), args, intrinsicName, funcResTy, loc,
       isConstrainedFPIntrinsic, shift, rightshift);
@@ -2781,8 +2823,17 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   case NEON::BI__builtin_neon_vqshlud_n_s64:
   case NEON::BI__builtin_neon_vqshld_n_u64:
   case NEON::BI__builtin_neon_vqshld_n_s64:
-  case NEON::BI__builtin_neon_vrshrd_n_u64:
   case NEON::BI__builtin_neon_vrshrd_n_s64:
+  case NEON::BI__builtin_neon_vrshrd_n_u64: {
+    // srshl/urshl are left-shift intrinsics; passing -n performs a rounding
+    // right-shift by n.
+    bool isSigned = builtinID == NEON::BI__builtin_neon_vrshrd_n_s64;
+    mlir::Value negShift = builder.createNeg(
+        builder.createIntCast(ops[1], builder.getSIntNTy(64)));
+    return builder.emitIntrinsicCallOp(
+        loc, isSigned ? "aarch64.neon.srshl" : "aarch64.neon.urshl",
+        convertType(expr->getType()), mlir::ValueRange{ops[0], negShift});
+  }
   case NEON::BI__builtin_neon_vrsrad_n_u64:
   case NEON::BI__builtin_neon_vrsrad_n_s64:
   case NEON::BI__builtin_neon_vshld_n_s64:
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c 
b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index bfaea2b8ae909..8e0f673695be1 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -7124,166 +7124,6 @@ uint64x2_t test_vsraq_n_u64(uint64x2_t a, uint64x2_t b) 
{
   return vsraq_n_u64(a, b, 3);
 }
 
-// CHECK-LABEL: define dso_local <8 x i8> @test_vrshr_n_s8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call <8 x i8> 
@llvm.aarch64.neon.srshl.v8i8(<8 x i8> [[A]], <8 x i8> splat (i8 -3))
-// CHECK-NEXT:    ret <8 x i8> [[VRSHR_N]]
-//
-int8x8_t test_vrshr_n_s8(int8x8_t a) {
-  return vrshr_n_s8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vrshr_n_s16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.srshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> splat (i16 -3))
-// CHECK-NEXT:    ret <4 x i16> [[VRSHR_N1]]
-//
-int16x4_t test_vrshr_n_s16(int16x4_t a) {
-  return vrshr_n_s16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vrshr_n_s32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.srshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> splat (i32 -3))
-// CHECK-NEXT:    ret <2 x i32> [[VRSHR_N1]]
-//
-int32x2_t test_vrshr_n_s32(int32x2_t a) {
-  return vrshr_n_s32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vrshrq_n_s8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.srshl.v16i8(<16 x i8> [[A]], <16 x i8> splat (i8 -3))
-// CHECK-NEXT:    ret <16 x i8> [[VRSHR_N]]
-//
-int8x16_t test_vrshrq_n_s8(int8x16_t a) {
-  return vrshrq_n_s8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vrshrq_n_s16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.srshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> splat (i16 -3))
-// CHECK-NEXT:    ret <8 x i16> [[VRSHR_N1]]
-//
-int16x8_t test_vrshrq_n_s16(int16x8_t a) {
-  return vrshrq_n_s16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vrshrq_n_s32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.srshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> splat (i32 -3))
-// CHECK-NEXT:    ret <4 x i32> [[VRSHR_N1]]
-//
-int32x4_t test_vrshrq_n_s32(int32x4_t a) {
-  return vrshrq_n_s32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i64> @test_vrshrq_n_s64(
-// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <2 x i64> 
@llvm.aarch64.neon.srshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> splat (i64 -3))
-// CHECK-NEXT:    ret <2 x i64> [[VRSHR_N1]]
-//
-int64x2_t test_vrshrq_n_s64(int64x2_t a) {
-  return vrshrq_n_s64(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vrshr_n_u8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call <8 x i8> 
@llvm.aarch64.neon.urshl.v8i8(<8 x i8> [[A]], <8 x i8> splat (i8 -3))
-// CHECK-NEXT:    ret <8 x i8> [[VRSHR_N]]
-//
-uint8x8_t test_vrshr_n_u8(uint8x8_t a) {
-  return vrshr_n_u8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vrshr_n_u16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> splat (i16 -3))
-// CHECK-NEXT:    ret <4 x i16> [[VRSHR_N1]]
-//
-uint16x4_t test_vrshr_n_u16(uint16x4_t a) {
-  return vrshr_n_u16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vrshr_n_u32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> splat (i32 -3))
-// CHECK-NEXT:    ret <2 x i32> [[VRSHR_N1]]
-//
-uint32x2_t test_vrshr_n_u32(uint32x2_t a) {
-  return vrshr_n_u32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vrshrq_n_u8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.urshl.v16i8(<16 x i8> [[A]], <16 x i8> splat (i8 -3))
-// CHECK-NEXT:    ret <16 x i8> [[VRSHR_N]]
-//
-uint8x16_t test_vrshrq_n_u8(uint8x16_t a) {
-  return vrshrq_n_u8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vrshrq_n_u16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.urshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> splat (i16 -3))
-// CHECK-NEXT:    ret <8 x i16> [[VRSHR_N1]]
-//
-uint16x8_t test_vrshrq_n_u16(uint16x8_t a) {
-  return vrshrq_n_u16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vrshrq_n_u32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.urshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> splat (i32 -3))
-// CHECK-NEXT:    ret <4 x i32> [[VRSHR_N1]]
-//
-uint32x4_t test_vrshrq_n_u32(uint32x4_t a) {
-  return vrshrq_n_u32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i64> @test_vrshrq_n_u64(
-// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <2 x i64> 
@llvm.aarch64.neon.urshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> splat (i64 -3))
-// CHECK-NEXT:    ret <2 x i64> [[VRSHR_N1]]
-//
-uint64x2_t test_vrshrq_n_u64(uint64x2_t a) {
-  return vrshrq_n_u64(a, 3);
-}
-
 // CHECK-LABEL: define dso_local <8 x i8> @test_vrsra_n_s8(
 // CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
@@ -17730,50 +17570,6 @@ uint64x1_t test_vshr_n_u64(uint64x1_t a) {
   return vshr_n_u64(a, 1);
 }
 
-// CHECK-LABEL: define dso_local i64 @test_vrshrd_n_s64(
-// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 
[[A]], i64 -63)
-// CHECK-NEXT:    ret i64 [[VRSHR_N]]
-//
-int64_t test_vrshrd_n_s64(int64_t a) {
-  return (int64_t)vrshrd_n_s64(a, 63);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vrshr_n_s64(
-// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <1 x i64> 
@llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -1))
-// CHECK-NEXT:    ret <1 x i64> [[VRSHR_N1]]
-//
-int64x1_t test_vrshr_n_s64(int64x1_t a) {
-  return vrshr_n_s64(a, 1);
-}
-
-// CHECK-LABEL: define dso_local i64 @test_vrshrd_n_u64(
-// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 
[[A]], i64 -63)
-// CHECK-NEXT:    ret i64 [[VRSHR_N]]
-//
-uint64_t test_vrshrd_n_u64(uint64_t a) {
-  return (uint64_t)vrshrd_n_u64(a, 63);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vrshr_n_u64(
-// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <1 x i64> 
@llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -1))
-// CHECK-NEXT:    ret <1 x i64> [[VRSHR_N1]]
-//
-uint64x1_t test_vrshr_n_u64(uint64x1_t a) {
-  return vrshr_n_u64(a, 1);
-}
-
 // CHECK-LABEL: define dso_local i64 @test_vsrad_n_s64(
 // CHECK-SAME: i64 noundef [[A:%.*]], i64 noundef [[B:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index b740c3b5b2310..a04305dade0ab 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -936,3 +936,155 @@ uint32x4_t test_vabaq_u32(uint32x4_t v1, uint32x4_t v2, 
uint32x4_t v3) {
 // LLVM-NEXT:    ret <4 x i32> [[ADD_I]]
   return vabaq_u32(v1, v2, v3);
 }
+
+//===----------------------------------------------------------------------===//
+// 2.x. Vector rounding shift right
+// 
https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#vector-rounding-shift-right
+//===----------------------------------------------------------------------===//
+
+// ALL-LABEL: @test_vrshr_n_s8(
+int8x8_t test_vrshr_n_s8(int8x8_t a) {
+// CIR: cir.vec.splat {{%.*}} : !s8i, !cir.vector<8 x !s8i>
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x 
i8> {{.*}}, <8 x i8> splat (i8 -3))
+// LLVM: ret <8 x i8> [[VRSHR_N]]
+  return vrshr_n_s8(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_s16(
+int16x4_t test_vrshr_n_s16(int16x4_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x 
i16> {{.*}}, <4 x i16> splat (i16 -3))
+// LLVM: ret <4 x i16> [[VRSHR_N1]]
+  return vrshr_n_s16(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_s32(
+int32x2_t test_vrshr_n_s32(int32x2_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x 
i32> {{.*}}, <2 x i32> splat (i32 -3))
+// LLVM: ret <2 x i32> [[VRSHR_N1]]
+  return vrshr_n_s32(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_s8(
+int8x16_t test_vrshrq_n_s8(int8x16_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x 
i8> {{.*}}, <16 x i8> splat (i8 -3))
+// LLVM: ret <16 x i8> [[VRSHR_N]]
+  return vrshrq_n_s8(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_s16(
+int16x8_t test_vrshrq_n_s16(int16x8_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x 
i16> {{.*}}, <8 x i16> splat (i16 -3))
+// LLVM: ret <8 x i16> [[VRSHR_N1]]
+  return vrshrq_n_s16(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_s32(
+int32x4_t test_vrshrq_n_s32(int32x4_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x 
i32> {{.*}}, <4 x i32> splat (i32 -3))
+// LLVM: ret <4 x i32> [[VRSHR_N1]]
+  return vrshrq_n_s32(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_s64(
+int64x2_t test_vrshrq_n_s64(int64x2_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x 
i64> {{.*}}, <2 x i64> splat (i64 -3))
+// LLVM: ret <2 x i64> [[VRSHR_N1]]
+  return vrshrq_n_s64(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_u8(
+uint8x8_t test_vrshr_n_u8(uint8x8_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x 
i8> {{.*}}, <8 x i8> splat (i8 -3))
+// LLVM: ret <8 x i8> [[VRSHR_N]]
+  return vrshr_n_u8(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_u16(
+uint16x4_t test_vrshr_n_u16(uint16x4_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x 
i16> {{.*}}, <4 x i16> splat (i16 -3))
+// LLVM: ret <4 x i16> [[VRSHR_N1]]
+  return vrshr_n_u16(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_u32(
+uint32x2_t test_vrshr_n_u32(uint32x2_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x 
i32> {{.*}}, <2 x i32> splat (i32 -3))
+// LLVM: ret <2 x i32> [[VRSHR_N1]]
+  return vrshr_n_u32(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_u8(
+uint8x16_t test_vrshrq_n_u8(uint8x16_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x 
i8> {{.*}}, <16 x i8> splat (i8 -3))
+// LLVM: ret <16 x i8> [[VRSHR_N]]
+  return vrshrq_n_u8(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_u16(
+uint16x8_t test_vrshrq_n_u16(uint16x8_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x 
i16> {{.*}}, <8 x i16> splat (i16 -3))
+// LLVM: ret <8 x i16> [[VRSHR_N1]]
+  return vrshrq_n_u16(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_u32(
+uint32x4_t test_vrshrq_n_u32(uint32x4_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x 
i32> {{.*}}, <4 x i32> splat (i32 -3))
+// LLVM: ret <4 x i32> [[VRSHR_N1]]
+  return vrshrq_n_u32(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_u64(
+uint64x2_t test_vrshrq_n_u64(uint64x2_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x 
i64> {{.*}}, <2 x i64> splat (i64 -3))
+// LLVM: ret <2 x i64> [[VRSHR_N1]]
+  return vrshrq_n_u64(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrd_n_s64(
+int64_t test_vrshrd_n_s64(int64_t a) {
+// CIR: cir.const #cir.int<{{.*}}> : !s64i
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 {{.*}}, 
i64 -63)
+// LLVM: ret i64 [[VRSHR_N]]
+  return (int64_t)vrshrd_n_s64(a, 63);
+}
+
+// ALL-LABEL: @test_vrshr_n_s64(
+int64x1_t test_vrshr_n_s64(int64x1_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x 
i64> {{.*}}, <1 x i64> splat (i64 -1))
+// LLVM: ret <1 x i64> [[VRSHR_N1]]
+  return vrshr_n_s64(a, 1);
+}
+
+// ALL-LABEL: @test_vrshrd_n_u64(
+uint64_t test_vrshrd_n_u64(uint64_t a) {
+// CIR: cir.const #cir.int<{{.*}}> : !s64i
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 {{.*}}, 
i64 -63)
+// LLVM: ret i64 [[VRSHR_N]]
+  return (uint64_t)vrshrd_n_u64(a, 63);
+}
+
+// ALL-LABEL: @test_vrshr_n_u64(
+uint64x1_t test_vrshr_n_u64(uint64x1_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x 
i64> {{.*}}, <1 x i64> splat (i64 -1))
+// LLVM: ret <1 x i64> [[VRSHR_N1]]
+  return vrshr_n_u64(a, 1);
+}

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to