Author: Sander de Smalen Date: 2020-05-07T13:31:46+01:00 New Revision: 5fa0eeec6eb1d1f6946d3e6ee2455e95bb79d870
URL: https://github.com/llvm/llvm-project/commit/5fa0eeec6eb1d1f6946d3e6ee2455e95bb79d870 DIFF: https://github.com/llvm/llvm-project/commit/5fa0eeec6eb1d1f6946d3e6ee2455e95bb79d870.diff LOG: [SveEmitter] Add more SVE2 builtins for shift operations This patch adds builtins for: - svqshlu - svrshr - svrsra - svsli - svsra - svsri Added: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_rshr.c clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_rsra.c clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sli.c clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sra.c clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sri.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_rshr.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_rsra.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sli.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sra.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sri.c Modified: clang/include/clang/Basic/arm_sve.td clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qshlu.c Removed: ################################################################################ diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index 5effceb93464..858b0eecbcb1 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -811,12 +811,6 @@ let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in { def SVSHRNB : SInst<"svshrnb[_n_{d}]", "hdi", "silUsUiUl", MergeNone, "aarch64_sve_shrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>; } -//////////////////////////////////////////////////////////////////////////////// -// SVE2 - Uniform DSP operations -let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in { -def SVQSHLU_M : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeOp1, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>; -} - //////////////////////////////////////////////////////////////////////////////// // While comparisons @@ -1295,6 +1289,22 @@ defm SVRSHL_S : SInstZPZxZ<"svrshl", "csil", "dPdx", "dPdK", "aarch64_sve_ defm SVRSHL_U : SInstZPZxZ<"svrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_urshl">; defm SVSQADD : SInstZPZxZ<"svsqadd", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_usqadd">; defm SVUQADD : SInstZPZxZ<"svuqadd", "csil", "dPdu", "dPdL", "aarch64_sve_suqadd">; + +def SVQSHLU_M : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeOp1, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>; +def SVQSHLU_X : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeAny, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>; +def SVQSHLU_Z : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeZero, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>; +def SVRSHR_M_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeOp1, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVRSHR_M_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeOp1, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVRSHR_X_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeAny, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVRSHR_X_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeAny, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVRSHR_Z_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeZero, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVRSHR_Z_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeZero, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVRSRA_S : SInst<"svrsra[_n_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_srsra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVRSRA_U : SInst<"svrsra[_n_{d}]", "dddi", "UcUsUiUl", MergeNone, "aarch64_sve_ursra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVSLI : SInst<"svsli[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_sli", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>; +def SVSRA_S : SInst<"svsra[_n_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_ssra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVSRA_U : SInst<"svsra[_n_{d}]", "dddi", "UcUsUiUl", MergeNone, "aarch64_sve_usra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; +def SVSRI : SInst<"svsri[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_sri", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>; } //////////////////////////////////////////////////////////////////////////////// // SVE2 - Non-temporal gather/scatter diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu.c index 69a260764d39..d61959573632 100644 --- a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu.c +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qshlu.c @@ -12,6 +12,100 @@ #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 #endif +svuint8_t test_svqshlu_n_s8_z(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s8_z + // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshlu.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], i32 0) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_z'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s8_z'}} + return SVE_ACLE_FUNC(svqshlu,_n_s8,_z,)(pg, op1, 0); +} + +svuint8_t test_svqshlu_n_s8_z_1(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s8_z_1 + // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshlu.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], i32 7) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_z'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s8_z'}} + return SVE_ACLE_FUNC(svqshlu,_n_s8,_z,)(pg, op1, 7); +} + +svuint16_t test_svqshlu_n_s16_z(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s16_z + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshlu.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], i32 0) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_z'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s16_z'}} + return SVE_ACLE_FUNC(svqshlu,_n_s16,_z,)(pg, op1, 0); +} + +svuint16_t test_svqshlu_n_s16_z_1(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s16_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshlu.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], i32 15) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_z'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s16_z'}} + return SVE_ACLE_FUNC(svqshlu,_n_s16,_z,)(pg, op1, 15); +} + +svuint32_t test_svqshlu_n_s32_z(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s32_z + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshlu.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], i32 0) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_z'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s32_z'}} + return SVE_ACLE_FUNC(svqshlu,_n_s32,_z,)(pg, op1, 0); +} + +svuint32_t test_svqshlu_n_s32_z_1(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s32_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshlu.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], i32 31) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_z'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s32_z'}} + return SVE_ACLE_FUNC(svqshlu,_n_s32,_z,)(pg, op1, 31); +} + +svuint64_t test_svqshlu_n_s64_z(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s64_z + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqshlu.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], i32 0) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_z'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s64_z'}} + return SVE_ACLE_FUNC(svqshlu,_n_s64,_z,)(pg, op1, 0); +} + +svuint64_t test_svqshlu_n_s64_z_1(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s64_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqshlu.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], i32 63) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_z'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s64_z'}} + return SVE_ACLE_FUNC(svqshlu,_n_s64,_z,)(pg, op1, 63); +} + svuint8_t test_svqshlu_n_s8_m(svbool_t pg, svint8_t op1) { // CHECK-LABEL: test_svqshlu_n_s8_m @@ -97,3 +191,89 @@ svuint64_t test_svqshlu_n_s64_m_1(svbool_t pg, svint64_t op1) // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s64_m'}} return SVE_ACLE_FUNC(svqshlu,_n_s64,_m,)(pg, op1, 63); } + +svuint8_t test_svqshlu_n_s8_x(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s8_x + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshlu.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 0) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_x'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s8_x'}} + return SVE_ACLE_FUNC(svqshlu,_n_s8,_x,)(pg, op1, 0); +} + +svuint8_t test_svqshlu_n_s8_x_1(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s8_x_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshlu.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 7) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_x'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s8_x'}} + return SVE_ACLE_FUNC(svqshlu,_n_s8,_x,)(pg, op1, 7); +} + +svuint16_t test_svqshlu_n_s16_x(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s16_x + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshlu.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 0) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_x'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s16_x'}} + return SVE_ACLE_FUNC(svqshlu,_n_s16,_x,)(pg, op1, 0); +} + +svuint16_t test_svqshlu_n_s16_x_1(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s16_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshlu.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 15) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_x'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s16_x'}} + return SVE_ACLE_FUNC(svqshlu,_n_s16,_x,)(pg, op1, 15); +} + +svuint32_t test_svqshlu_n_s32_x(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s32_x + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshlu.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 0) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_x'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s32_x'}} + return SVE_ACLE_FUNC(svqshlu,_n_s32,_x,)(pg, op1, 0); +} + +svuint32_t test_svqshlu_n_s32_x_1(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s32_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshlu.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 31) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_x'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s32_x'}} + return SVE_ACLE_FUNC(svqshlu,_n_s32,_x,)(pg, op1, 31); +} + +svuint64_t test_svqshlu_n_s64_x(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s64_x + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqshlu.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 0) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_x'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s64_x'}} + return SVE_ACLE_FUNC(svqshlu,_n_s64,_x,)(pg, op1, 0); +} + +svuint64_t test_svqshlu_n_s64_x_1(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svqshlu_n_s64_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqshlu.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 63) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqshlu_x'}} + // expected-warning@+1 {{implicit declaration of function 'svqshlu_n_s64_x'}} + return SVE_ACLE_FUNC(svqshlu,_n_s64,_x,)(pg, op1, 63); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_rshr.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_rshr.c new file mode 100644 index 000000000000..be4624141753 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_rshr.c @@ -0,0 +1,545 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svrshr_n_s8_z(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s8_z + // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.srshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s8_z'}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_z,)(pg, op1, 1); +} + +svint8_t test_svrshr_n_s8_z_1(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s8_z_1 + // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.srshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s8_z'}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_z,)(pg, op1, 8); +} + +svint16_t test_svrshr_n_s16_z(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s16_z + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.srshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s16_z'}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_z,)(pg, op1, 1); +} + +svint16_t test_svrshr_n_s16_z_1(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s16_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.srshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s16_z'}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_z,)(pg, op1, 16); +} + +svint32_t test_svrshr_n_s32_z(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s32_z + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.srshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s32_z'}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_z,)(pg, op1, 1); +} + +svint32_t test_svrshr_n_s32_z_1(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s32_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.srshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s32_z'}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_z,)(pg, op1, 32); +} + +svint64_t test_svrshr_n_s64_z(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s64_z + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.srshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s64_z'}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_z,)(pg, op1, 1); +} + +svint64_t test_svrshr_n_s64_z_1(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s64_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.srshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s64_z'}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_z,)(pg, op1, 64); +} + +svuint8_t test_svrshr_n_u8_z(svbool_t pg, svuint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u8_z + // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.urshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u8_z'}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_z,)(pg, op1, 1); +} + +svuint8_t test_svrshr_n_u8_z_1(svbool_t pg, svuint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u8_z_1 + // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.urshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u8_z'}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_z,)(pg, op1, 8); +} + +svuint16_t test_svrshr_n_u16_z(svbool_t pg, svuint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u16_z + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.urshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u16_z'}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_z,)(pg, op1, 1); +} + +svuint16_t test_svrshr_n_u16_z_1(svbool_t pg, svuint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u16_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.urshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u16_z'}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_z,)(pg, op1, 16); +} + +svuint32_t test_svrshr_n_u32_z(svbool_t pg, svuint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u32_z + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.urshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u32_z'}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_z,)(pg, op1, 1); +} + +svuint32_t test_svrshr_n_u32_z_1(svbool_t pg, svuint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u32_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.urshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u32_z'}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_z,)(pg, op1, 32); +} + +svuint64_t test_svrshr_n_u64_z(svbool_t pg, svuint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u64_z + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.urshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u64_z'}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_z,)(pg, op1, 1); +} + +svuint64_t test_svrshr_n_u64_z_1(svbool_t pg, svuint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u64_z_1 + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.urshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_z'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u64_z'}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_z,)(pg, op1, 64); +} + +svint8_t test_svrshr_n_s8_m(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s8_m + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.srshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s8_m'}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_m,)(pg, op1, 1); +} + +svint8_t test_svrshr_n_s8_m_1(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s8_m_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.srshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s8_m'}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_m,)(pg, op1, 8); +} + +svint16_t test_svrshr_n_s16_m(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s16_m + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.srshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s16_m'}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_m,)(pg, op1, 1); +} + +svint16_t test_svrshr_n_s16_m_1(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s16_m_1 + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.srshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s16_m'}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_m,)(pg, op1, 16); +} + +svint32_t test_svrshr_n_s32_m(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s32_m + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.srshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s32_m'}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_m,)(pg, op1, 1); +} + +svint32_t test_svrshr_n_s32_m_1(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s32_m_1 + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.srshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s32_m'}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_m,)(pg, op1, 32); +} + +svint64_t test_svrshr_n_s64_m(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s64_m + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.srshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s64_m'}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_m,)(pg, op1, 1); +} + +svint64_t test_svrshr_n_s64_m_1(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s64_m_1 + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.srshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s64_m'}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_m,)(pg, op1, 64); +} + +svuint8_t test_svrshr_n_u8_m(svbool_t pg, svuint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u8_m + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.urshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u8_m'}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_m,)(pg, op1, 1); +} + +svuint8_t test_svrshr_n_u8_m_1(svbool_t pg, svuint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u8_m_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.urshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u8_m'}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_m,)(pg, op1, 8); +} + +svuint16_t test_svrshr_n_u16_m(svbool_t pg, svuint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u16_m + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.urshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u16_m'}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_m,)(pg, op1, 1); +} + +svuint16_t test_svrshr_n_u16_m_1(svbool_t pg, svuint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u16_m_1 + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.urshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u16_m'}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_m,)(pg, op1, 16); +} + +svuint32_t test_svrshr_n_u32_m(svbool_t pg, svuint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u32_m + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.urshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u32_m'}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_m,)(pg, op1, 1); +} + +svuint32_t test_svrshr_n_u32_m_1(svbool_t pg, svuint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u32_m_1 + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.urshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u32_m'}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_m,)(pg, op1, 32); +} + +svuint64_t test_svrshr_n_u64_m(svbool_t pg, svuint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u64_m + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.urshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u64_m'}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_m,)(pg, op1, 1); +} + +svuint64_t test_svrshr_n_u64_m_1(svbool_t pg, svuint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u64_m_1 + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.urshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_m'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u64_m'}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_m,)(pg, op1, 64); +} + +svint8_t test_svrshr_n_s8_x(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s8_x + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.srshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s8_x'}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_x,)(pg, op1, 1); +} + +svint8_t test_svrshr_n_s8_x_1(svbool_t pg, svint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s8_x_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.srshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s8_x'}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_x,)(pg, op1, 8); +} + +svint16_t test_svrshr_n_s16_x(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s16_x + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.srshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s16_x'}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_x,)(pg, op1, 1); +} + +svint16_t test_svrshr_n_s16_x_1(svbool_t pg, svint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s16_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.srshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s16_x'}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_x,)(pg, op1, 16); +} + +svint32_t test_svrshr_n_s32_x(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s32_x + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.srshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s32_x'}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_x,)(pg, op1, 1); +} + +svint32_t test_svrshr_n_s32_x_1(svbool_t pg, svint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s32_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.srshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s32_x'}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_x,)(pg, op1, 32); +} + +svint64_t test_svrshr_n_s64_x(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s64_x + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.srshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s64_x'}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_x,)(pg, op1, 1); +} + +svint64_t test_svrshr_n_s64_x_1(svbool_t pg, svint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_s64_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.srshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_s64_x'}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_x,)(pg, op1, 64); +} + +svuint8_t test_svrshr_n_u8_x(svbool_t pg, svuint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u8_x + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.urshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u8_x'}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_x,)(pg, op1, 1); +} + +svuint8_t test_svrshr_n_u8_x_1(svbool_t pg, svuint8_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u8_x_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.urshr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u8_x'}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_x,)(pg, op1, 8); +} + +svuint16_t test_svrshr_n_u16_x(svbool_t pg, svuint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u16_x + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.urshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u16_x'}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_x,)(pg, op1, 1); +} + +svuint16_t test_svrshr_n_u16_x_1(svbool_t pg, svuint16_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u16_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.urshr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u16_x'}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_x,)(pg, op1, 16); +} + +svuint32_t test_svrshr_n_u32_x(svbool_t pg, svuint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u32_x + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.urshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u32_x'}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_x,)(pg, op1, 1); +} + +svuint32_t test_svrshr_n_u32_x_1(svbool_t pg, svuint32_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u32_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.urshr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u32_x'}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_x,)(pg, op1, 32); +} + +svuint64_t test_svrshr_n_u64_x(svbool_t pg, svuint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u64_x + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.urshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u64_x'}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_x,)(pg, op1, 1); +} + +svuint64_t test_svrshr_n_u64_x_1(svbool_t pg, svuint64_t op1) +{ + // CHECK-LABEL: test_svrshr_n_u64_x_1 + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.urshr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrshr_x'}} + // expected-warning@+1 {{implicit declaration of function 'svrshr_n_u64_x'}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_x,)(pg, op1, 64); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_rsra.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_rsra.c new file mode 100644 index 000000000000..9e42b7603231 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_rsra.c @@ -0,0 +1,173 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svrsra_n_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svrsra_n_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.srsra.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_s8'}} + return SVE_ACLE_FUNC(svrsra,_n_s8,,)(op1, op2, 1); +} + +svint8_t test_svrsra_n_s8_1(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svrsra_n_s8_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.srsra.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_s8'}} + return SVE_ACLE_FUNC(svrsra,_n_s8,,)(op1, op2, 8); +} + +svint16_t test_svrsra_n_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svrsra_n_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.srsra.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_s16'}} + return SVE_ACLE_FUNC(svrsra,_n_s16,,)(op1, op2, 1); +} + +svint16_t test_svrsra_n_s16_1(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svrsra_n_s16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.srsra.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_s16'}} + return SVE_ACLE_FUNC(svrsra,_n_s16,,)(op1, op2, 16); +} + +svint32_t test_svrsra_n_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svrsra_n_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.srsra.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_s32'}} + return SVE_ACLE_FUNC(svrsra,_n_s32,,)(op1, op2, 1); +} + +svint32_t test_svrsra_n_s32_1(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svrsra_n_s32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.srsra.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_s32'}} + return SVE_ACLE_FUNC(svrsra,_n_s32,,)(op1, op2, 32); +} + +svint64_t test_svrsra_n_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svrsra_n_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.srsra.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_s64'}} + return SVE_ACLE_FUNC(svrsra,_n_s64,,)(op1, op2, 1); +} + +svint64_t test_svrsra_n_s64_1(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svrsra_n_s64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.srsra.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_s64'}} + return SVE_ACLE_FUNC(svrsra,_n_s64,,)(op1, op2, 64); +} + +svuint8_t test_svrsra_n_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svrsra_n_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ursra.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_u8'}} + return SVE_ACLE_FUNC(svrsra,_n_u8,,)(op1, op2, 1); +} + +svuint8_t test_svrsra_n_u8_1(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svrsra_n_u8_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ursra.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_u8'}} + return SVE_ACLE_FUNC(svrsra,_n_u8,,)(op1, op2, 8); +} + +svuint16_t test_svrsra_n_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svrsra_n_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ursra.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_u16'}} + return SVE_ACLE_FUNC(svrsra,_n_u16,,)(op1, op2, 1); +} + +svuint16_t test_svrsra_n_u16_1(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svrsra_n_u16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ursra.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_u16'}} + return SVE_ACLE_FUNC(svrsra,_n_u16,,)(op1, op2, 16); +} + +svuint32_t test_svrsra_n_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svrsra_n_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ursra.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_u32'}} + return SVE_ACLE_FUNC(svrsra,_n_u32,,)(op1, op2, 1); +} + +svuint32_t test_svrsra_n_u32_1(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svrsra_n_u32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ursra.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_u32'}} + return SVE_ACLE_FUNC(svrsra,_n_u32,,)(op1, op2, 32); +} + +svuint64_t test_svrsra_n_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svrsra_n_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ursra.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_u64'}} + return SVE_ACLE_FUNC(svrsra,_n_u64,,)(op1, op2, 1); +} + +svuint64_t test_svrsra_n_u64_1(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svrsra_n_u64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ursra.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svrsra'}} + // expected-warning@+1 {{implicit declaration of function 'svrsra_n_u64'}} + return SVE_ACLE_FUNC(svrsra,_n_u64,,)(op1, op2, 64); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sli.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sli.c new file mode 100644 index 000000000000..e25c9abf6e1d --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sli.c @@ -0,0 +1,173 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svsli_n_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svsli_n_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sli.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 0) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_s8'}} + return SVE_ACLE_FUNC(svsli,_n_s8,,)(op1, op2, 0); +} + +svint8_t test_svsli_n_s8_1(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svsli_n_s8_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sli.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 7) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_s8'}} + return SVE_ACLE_FUNC(svsli,_n_s8,,)(op1, op2, 7); +} + +svint16_t test_svsli_n_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svsli_n_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sli.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_s16'}} + return SVE_ACLE_FUNC(svsli,_n_s16,,)(op1, op2, 0); +} + +svint16_t test_svsli_n_s16_1(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svsli_n_s16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sli.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 15) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_s16'}} + return SVE_ACLE_FUNC(svsli,_n_s16,,)(op1, op2, 15); +} + +svint32_t test_svsli_n_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svsli_n_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sli.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_s32'}} + return SVE_ACLE_FUNC(svsli,_n_s32,,)(op1, op2, 0); +} + +svint32_t test_svsli_n_s32_1(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svsli_n_s32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sli.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 31) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_s32'}} + return SVE_ACLE_FUNC(svsli,_n_s32,,)(op1, op2, 31); +} + +svint64_t test_svsli_n_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svsli_n_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sli.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_s64'}} + return SVE_ACLE_FUNC(svsli,_n_s64,,)(op1, op2, 0); +} + +svint64_t test_svsli_n_s64_1(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svsli_n_s64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sli.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 63) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_s64'}} + return SVE_ACLE_FUNC(svsli,_n_s64,,)(op1, op2, 63); +} + +svuint8_t test_svsli_n_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svsli_n_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sli.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 0) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_u8'}} + return SVE_ACLE_FUNC(svsli,_n_u8,,)(op1, op2, 0); +} + +svuint8_t test_svsli_n_u8_1(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svsli_n_u8_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sli.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 7) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_u8'}} + return SVE_ACLE_FUNC(svsli,_n_u8,,)(op1, op2, 7); +} + +svuint16_t test_svsli_n_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svsli_n_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sli.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_u16'}} + return SVE_ACLE_FUNC(svsli,_n_u16,,)(op1, op2, 0); +} + +svuint16_t test_svsli_n_u16_1(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svsli_n_u16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sli.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 15) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_u16'}} + return SVE_ACLE_FUNC(svsli,_n_u16,,)(op1, op2, 15); +} + +svuint32_t test_svsli_n_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svsli_n_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sli.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_u32'}} + return SVE_ACLE_FUNC(svsli,_n_u32,,)(op1, op2, 0); +} + +svuint32_t test_svsli_n_u32_1(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svsli_n_u32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sli.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 31) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_u32'}} + return SVE_ACLE_FUNC(svsli,_n_u32,,)(op1, op2, 31); +} + +svuint64_t test_svsli_n_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svsli_n_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sli.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_u64'}} + return SVE_ACLE_FUNC(svsli,_n_u64,,)(op1, op2, 0); +} + +svuint64_t test_svsli_n_u64_1(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svsli_n_u64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sli.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 63) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsli'}} + // expected-warning@+1 {{implicit declaration of function 'svsli_n_u64'}} + return SVE_ACLE_FUNC(svsli,_n_u64,,)(op1, op2, 63); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sra.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sra.c new file mode 100644 index 000000000000..d6535a463430 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sra.c @@ -0,0 +1,173 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svsra_n_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svsra_n_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ssra.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_s8'}} + return SVE_ACLE_FUNC(svsra,_n_s8,,)(op1, op2, 1); +} + +svint8_t test_svsra_n_s8_1(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svsra_n_s8_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ssra.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_s8'}} + return SVE_ACLE_FUNC(svsra,_n_s8,,)(op1, op2, 8); +} + +svint16_t test_svsra_n_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svsra_n_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssra.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_s16'}} + return SVE_ACLE_FUNC(svsra,_n_s16,,)(op1, op2, 1); +} + +svint16_t test_svsra_n_s16_1(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svsra_n_s16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssra.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_s16'}} + return SVE_ACLE_FUNC(svsra,_n_s16,,)(op1, op2, 16); +} + +svint32_t test_svsra_n_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svsra_n_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssra.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_s32'}} + return SVE_ACLE_FUNC(svsra,_n_s32,,)(op1, op2, 1); +} + +svint32_t test_svsra_n_s32_1(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svsra_n_s32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssra.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_s32'}} + return SVE_ACLE_FUNC(svsra,_n_s32,,)(op1, op2, 32); +} + +svint64_t test_svsra_n_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svsra_n_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssra.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_s64'}} + return SVE_ACLE_FUNC(svsra,_n_s64,,)(op1, op2, 1); +} + +svint64_t test_svsra_n_s64_1(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svsra_n_s64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssra.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_s64'}} + return SVE_ACLE_FUNC(svsra,_n_s64,,)(op1, op2, 64); +} + +svuint8_t test_svsra_n_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svsra_n_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.usra.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_u8'}} + return SVE_ACLE_FUNC(svsra,_n_u8,,)(op1, op2, 1); +} + +svuint8_t test_svsra_n_u8_1(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svsra_n_u8_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.usra.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_u8'}} + return SVE_ACLE_FUNC(svsra,_n_u8,,)(op1, op2, 8); +} + +svuint16_t test_svsra_n_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svsra_n_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usra.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_u16'}} + return SVE_ACLE_FUNC(svsra,_n_u16,,)(op1, op2, 1); +} + +svuint16_t test_svsra_n_u16_1(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svsra_n_u16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usra.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_u16'}} + return SVE_ACLE_FUNC(svsra,_n_u16,,)(op1, op2, 16); +} + +svuint32_t test_svsra_n_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svsra_n_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usra.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_u32'}} + return SVE_ACLE_FUNC(svsra,_n_u32,,)(op1, op2, 1); +} + +svuint32_t test_svsra_n_u32_1(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svsra_n_u32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usra.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_u32'}} + return SVE_ACLE_FUNC(svsra,_n_u32,,)(op1, op2, 32); +} + +svuint64_t test_svsra_n_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svsra_n_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usra.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_u64'}} + return SVE_ACLE_FUNC(svsra,_n_u64,,)(op1, op2, 1); +} + +svuint64_t test_svsra_n_u64_1(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svsra_n_u64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usra.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsra'}} + // expected-warning@+1 {{implicit declaration of function 'svsra_n_u64'}} + return SVE_ACLE_FUNC(svsra,_n_u64,,)(op1, op2, 64); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sri.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sri.c new file mode 100644 index 000000000000..7abb04d291c4 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sri.c @@ -0,0 +1,173 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svsri_n_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svsri_n_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sri.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_s8'}} + return SVE_ACLE_FUNC(svsri,_n_s8,,)(op1, op2, 1); +} + +svint8_t test_svsri_n_s8_1(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svsri_n_s8_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sri.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_s8'}} + return SVE_ACLE_FUNC(svsri,_n_s8,,)(op1, op2, 8); +} + +svint16_t test_svsri_n_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svsri_n_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sri.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_s16'}} + return SVE_ACLE_FUNC(svsri,_n_s16,,)(op1, op2, 1); +} + +svint16_t test_svsri_n_s16_1(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svsri_n_s16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sri.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_s16'}} + return SVE_ACLE_FUNC(svsri,_n_s16,,)(op1, op2, 16); +} + +svint32_t test_svsri_n_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svsri_n_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sri.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_s32'}} + return SVE_ACLE_FUNC(svsri,_n_s32,,)(op1, op2, 1); +} + +svint32_t test_svsri_n_s32_1(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svsri_n_s32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sri.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_s32'}} + return SVE_ACLE_FUNC(svsri,_n_s32,,)(op1, op2, 32); +} + +svint64_t test_svsri_n_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svsri_n_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sri.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_s64'}} + return SVE_ACLE_FUNC(svsri,_n_s64,,)(op1, op2, 1); +} + +svint64_t test_svsri_n_s64_1(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svsri_n_s64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sri.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_s64'}} + return SVE_ACLE_FUNC(svsri,_n_s64,,)(op1, op2, 64); +} + +svuint8_t test_svsri_n_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svsri_n_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sri.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 1) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_u8'}} + return SVE_ACLE_FUNC(svsri,_n_u8,,)(op1, op2, 1); +} + +svuint8_t test_svsri_n_u8_1(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svsri_n_u8_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sri.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 8) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_u8'}} + return SVE_ACLE_FUNC(svsri,_n_u8,,)(op1, op2, 8); +} + +svuint16_t test_svsri_n_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svsri_n_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sri.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 1) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_u16'}} + return SVE_ACLE_FUNC(svsri,_n_u16,,)(op1, op2, 1); +} + +svuint16_t test_svsri_n_u16_1(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svsri_n_u16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sri.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 16) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_u16'}} + return SVE_ACLE_FUNC(svsri,_n_u16,,)(op1, op2, 16); +} + +svuint32_t test_svsri_n_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svsri_n_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sri.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 1) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_u32'}} + return SVE_ACLE_FUNC(svsri,_n_u32,,)(op1, op2, 1); +} + +svuint32_t test_svsri_n_u32_1(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svsri_n_u32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sri.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 32) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_u32'}} + return SVE_ACLE_FUNC(svsri,_n_u32,,)(op1, op2, 32); +} + +svuint64_t test_svsri_n_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svsri_n_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sri.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_u64'}} + return SVE_ACLE_FUNC(svsri,_n_u64,,)(op1, op2, 1); +} + +svuint64_t test_svsri_n_u64_1(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svsri_n_u64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sri.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 64) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svsri'}} + // expected-warning@+1 {{implicit declaration of function 'svsri_n_u64'}} + return SVE_ACLE_FUNC(svsri,_n_u64,,)(op1, op2, 64); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qshlu.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qshlu.c index 022cf804b007..6b7e5a5f6937 100644 --- a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qshlu.c +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qshlu.c @@ -10,6 +10,30 @@ #include <arm_sve.h> +svuint8_t test_svqshlu_n_s8_z(svbool_t pg, svint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} + return SVE_ACLE_FUNC(svqshlu,_n_s8,_z,)(pg, op1, -1); +} + +svuint16_t test_svqshlu_n_s16_z(svbool_t pg, svint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 15]}} + return SVE_ACLE_FUNC(svqshlu,_n_s16,_z,)(pg, op1, -1); +} + +svuint32_t test_svqshlu_n_s32_z(svbool_t pg, svint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 31]}} + return SVE_ACLE_FUNC(svqshlu,_n_s32,_z,)(pg, op1, -1); +} + +svuint64_t test_svqshlu_n_s64_z(svbool_t pg, svint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 63]}} + return SVE_ACLE_FUNC(svqshlu,_n_s64,_z,)(pg, op1, -1); +} + svuint8_t test_svqshlu_n_s8_m(svbool_t pg, svint8_t op1) { // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} @@ -33,3 +57,27 @@ svuint64_t test_svqshlu_n_s64_m(svbool_t pg, svint64_t op1) // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 63]}} return SVE_ACLE_FUNC(svqshlu,_n_s64,_m,)(pg, op1, -1); } + +svuint8_t test_svqshlu_n_s8_x(svbool_t pg, svint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} + return SVE_ACLE_FUNC(svqshlu,_n_s8,_x,)(pg, op1, -1); +} + +svuint16_t test_svqshlu_n_s16_x(svbool_t pg, svint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 15]}} + return SVE_ACLE_FUNC(svqshlu,_n_s16,_x,)(pg, op1, -1); +} + +svuint32_t test_svqshlu_n_s32_x(svbool_t pg, svint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 31]}} + return SVE_ACLE_FUNC(svqshlu,_n_s32,_x,)(pg, op1, -1); +} + +svuint64_t test_svqshlu_n_s64_x(svbool_t pg, svint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 63]}} + return SVE_ACLE_FUNC(svqshlu,_n_s64,_x,)(pg, op1, -1); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_rshr.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_rshr.c new file mode 100644 index 000000000000..2f7513db3a34 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_rshr.c @@ -0,0 +1,179 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint8_t test_svrshr_n_s8_z(svbool_t pg, svint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_z,)(pg, op1, 0); +} + +svint8_t test_svrshr_n_s8_z_1(svbool_t pg, svint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_z,)(pg, op1, 9); +} + +svint16_t test_svrshr_n_s16_z(svbool_t pg, svint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_z,)(pg, op1, 0); +} + +svint16_t test_svrshr_n_s16_z_1(svbool_t pg, svint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_z,)(pg, op1, 17); +} + +svint32_t test_svrshr_n_s32_z(svbool_t pg, svint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_z,)(pg, op1, 0); +} + +svint32_t test_svrshr_n_s32_z_1(svbool_t pg, svint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_z,)(pg, op1, 33); +} + +svint64_t test_svrshr_n_s64_z(svbool_t pg, svint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_z,)(pg, op1, 0); +} + +svint64_t test_svrshr_n_s64_z_1(svbool_t pg, svint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_z,)(pg, op1, 65); +} + +svuint8_t test_svrshr_n_u8_z(svbool_t pg, svuint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_z,)(pg, op1, 0); +} + +svuint16_t test_svrshr_n_u16_z(svbool_t pg, svuint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_z,)(pg, op1, 0); +} + +svuint32_t test_svrshr_n_u32_z(svbool_t pg, svuint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_z,)(pg, op1, 0); +} + +svuint64_t test_svrshr_n_u64_z(svbool_t pg, svuint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_z,)(pg, op1, 0); +} + +svint8_t test_svrshr_n_s8_m(svbool_t pg, svint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_m,)(pg, op1, 0); +} + +svint16_t test_svrshr_n_s16_m(svbool_t pg, svint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_m,)(pg, op1, 0); +} + +svint32_t test_svrshr_n_s32_m(svbool_t pg, svint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_m,)(pg, op1, 0); +} + +svint64_t test_svrshr_n_s64_m(svbool_t pg, svint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_m,)(pg, op1, 0); +} + +svuint8_t test_svrshr_n_u8_m(svbool_t pg, svuint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_m,)(pg, op1, 0); +} + +svuint16_t test_svrshr_n_u16_m(svbool_t pg, svuint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_m,)(pg, op1, 0); +} + +svuint32_t test_svrshr_n_u32_m(svbool_t pg, svuint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_m,)(pg, op1, 0); +} + +svuint64_t test_svrshr_n_u64_m(svbool_t pg, svuint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_m,)(pg, op1, 0); +} + +svint8_t test_svrshr_n_s8_x(svbool_t pg, svint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrshr,_n_s8,_x,)(pg, op1, 0); +} + +svint16_t test_svrshr_n_s16_x(svbool_t pg, svint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrshr,_n_s16,_x,)(pg, op1, 0); +} + +svint32_t test_svrshr_n_s32_x(svbool_t pg, svint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrshr,_n_s32,_x,)(pg, op1, 0); +} + +svint64_t test_svrshr_n_s64_x(svbool_t pg, svint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrshr,_n_s64,_x,)(pg, op1, 0); +} + +svuint8_t test_svrshr_n_u8_x(svbool_t pg, svuint8_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrshr,_n_u8,_x,)(pg, op1, 0); +} + +svuint16_t test_svrshr_n_u16_x(svbool_t pg, svuint16_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrshr,_n_u16,_x,)(pg, op1, 0); +} + +svuint32_t test_svrshr_n_u32_x(svbool_t pg, svuint32_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrshr,_n_u32,_x,)(pg, op1, 0); +} + +svuint64_t test_svrshr_n_u64_x(svbool_t pg, svuint64_t op1) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrshr,_n_u64,_x,)(pg, op1, 0); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_rsra.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_rsra.c new file mode 100644 index 000000000000..e279a384e01d --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_rsra.c @@ -0,0 +1,59 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint8_t test_svrsra_n_s8(svint8_t op1, svint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrsra,_n_s8,,)(op1, op2, 0); +} + +svint16_t test_svrsra_n_s16(svint16_t op1, svint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrsra,_n_s16,,)(op1, op2, 0); +} + +svint32_t test_svrsra_n_s32(svint32_t op1, svint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrsra,_n_s32,,)(op1, op2, 0); +} + +svint64_t test_svrsra_n_s64(svint64_t op1, svint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrsra,_n_s64,,)(op1, op2, 0); +} + +svuint8_t test_svrsra_n_u8(svuint8_t op1, svuint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svrsra,_n_u8,,)(op1, op2, 0); +} + +svuint16_t test_svrsra_n_u16(svuint16_t op1, svuint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svrsra,_n_u16,,)(op1, op2, 0); +} + +svuint32_t test_svrsra_n_u32(svuint32_t op1, svuint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svrsra,_n_u32,,)(op1, op2, 0); +} + +svuint64_t test_svrsra_n_u64(svuint64_t op1, svuint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svrsra,_n_u64,,)(op1, op2, 0); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sli.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sli.c new file mode 100644 index 000000000000..f8846952dd4a --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sli.c @@ -0,0 +1,59 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint8_t test_svsli_n_s8(svint8_t op1, svint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} + return SVE_ACLE_FUNC(svsli,_n_s8,,)(op1, op2, -1); +} + +svint16_t test_svsli_n_s16(svint16_t op1, svint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 15]}} + return SVE_ACLE_FUNC(svsli,_n_s16,,)(op1, op2, -1); +} + +svint32_t test_svsli_n_s32(svint32_t op1, svint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 31]}} + return SVE_ACLE_FUNC(svsli,_n_s32,,)(op1, op2, -1); +} + +svint64_t test_svsli_n_s64(svint64_t op1, svint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 63]}} + return SVE_ACLE_FUNC(svsli,_n_s64,,)(op1, op2, -1); +} + +svuint8_t test_svsli_n_u8(svuint8_t op1, svuint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} + return SVE_ACLE_FUNC(svsli,_n_u8,,)(op1, op2, -1); +} + +svuint16_t test_svsli_n_u16(svuint16_t op1, svuint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 15]}} + return SVE_ACLE_FUNC(svsli,_n_u16,,)(op1, op2, -1); +} + +svuint32_t test_svsli_n_u32(svuint32_t op1, svuint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 31]}} + return SVE_ACLE_FUNC(svsli,_n_u32,,)(op1, op2, -1); +} + +svuint64_t test_svsli_n_u64(svuint64_t op1, svuint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 63]}} + return SVE_ACLE_FUNC(svsli,_n_u64,,)(op1, op2, -1); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sra.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sra.c new file mode 100644 index 000000000000..efd4a70c37ec --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sra.c @@ -0,0 +1,59 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint8_t test_svsra_n_s8(svint8_t op1, svint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svsra,_n_s8,,)(op1, op2, 0); +} + +svint16_t test_svsra_n_s16(svint16_t op1, svint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svsra,_n_s16,,)(op1, op2, 0); +} + +svint32_t test_svsra_n_s32(svint32_t op1, svint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svsra,_n_s32,,)(op1, op2, 0); +} + +svint64_t test_svsra_n_s64(svint64_t op1, svint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svsra,_n_s64,,)(op1, op2, 0); +} + +svuint8_t test_svsra_n_u8(svuint8_t op1, svuint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svsra,_n_u8,,)(op1, op2, 0); +} + +svuint16_t test_svsra_n_u16(svuint16_t op1, svuint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svsra,_n_u16,,)(op1, op2, 0); +} + +svuint32_t test_svsra_n_u32(svuint32_t op1, svuint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svsra,_n_u32,,)(op1, op2, 0); +} + +svuint64_t test_svsra_n_u64(svuint64_t op1, svuint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svsra,_n_u64,,)(op1, op2, 0); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sri.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sri.c new file mode 100644 index 000000000000..48aea5c12427 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_sri.c @@ -0,0 +1,107 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint8_t test_svsri_n_s8(svint8_t op1, svint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svsri,_n_s8,,)(op1, op2, 0); +} + +svint16_t test_svsri_n_s16(svint16_t op1, svint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svsri,_n_s16,,)(op1, op2, 0); +} + +svint32_t test_svsri_n_s32(svint32_t op1, svint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svsri,_n_s32,,)(op1, op2, 0); +} + +svint64_t test_svsri_n_s64(svint64_t op1, svint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svsri,_n_s64,,)(op1, op2, 0); +} + +svuint8_t test_svsri_n_u8(svuint8_t op1, svuint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svsri,_n_u8,,)(op1, op2, 0); +} + +svuint16_t test_svsri_n_u16(svuint16_t op1, svuint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svsri,_n_u16,,)(op1, op2, 0); +} + +svuint32_t test_svsri_n_u32(svuint32_t op1, svuint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svsri,_n_u32,,)(op1, op2, 0); +} + +svuint64_t test_svsri_n_u64(svuint64_t op1, svuint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svsri,_n_u64,,)(op1, op2, 0); +} + +svint8_t test_svsri_n_s8_1(svint8_t op1, svint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svsri,_n_s8,,)(op1, op2, 9); +} + +svint16_t test_svsri_n_s16_1(svint16_t op1, svint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svsri,_n_s16,,)(op1, op2, 17); +} + +svint32_t test_svsri_n_s32_1(svint32_t op1, svint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svsri,_n_s32,,)(op1, op2, 33); +} + +svint64_t test_svsri_n_s64_1(svint64_t op1, svint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svsri,_n_s64,,)(op1, op2, 65); +} + +svuint8_t test_svsri_n_u8_1(svuint8_t op1, svuint8_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}} + return SVE_ACLE_FUNC(svsri,_n_u8,,)(op1, op2, 9); +} + +svuint16_t test_svsri_n_u16_1(svuint16_t op1, svuint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}} + return SVE_ACLE_FUNC(svsri,_n_u16,,)(op1, op2, 17); +} + +svuint32_t test_svsri_n_u32_1(svuint32_t op1, svuint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}} + return SVE_ACLE_FUNC(svsri,_n_u32,,)(op1, op2, 33); +} + +svuint64_t test_svsri_n_u64_1(svuint64_t op1, svuint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}} + return SVE_ACLE_FUNC(svsri,_n_u64,,)(op1, op2, 65); +} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits