Author: Sander de Smalen Date: 2020-05-07T13:31:46+01:00 New Revision: 0d22076531ce9f2757ae4c69e647f02e99394e05
URL: https://github.com/llvm/llvm-project/commit/0d22076531ce9f2757ae4c69e647f02e99394e05 DIFF: https://github.com/llvm/llvm-project/commit/0d22076531ce9f2757ae4c69e647f02e99394e05.diff LOG: [SveEmitter] Add builtins for SVE2 uniform DSP operations This patch adds builtins for: - svqdmulh, svqdmulh_lane - svqrdmlah, svqrdmlah_lane - svqrdmlsh, svqrdmlsh_lane - svqrdmulh, svqrdmulh_lane Added: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_aba.c clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmulh.c clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmlah.c clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmlsh.c clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmulh.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmulh.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmlah.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmlsh.c clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmulh.c Modified: clang/include/clang/Basic/arm_sve.td Removed: ################################################################################ diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index 858b0eecbcb1..594efc507221 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -1290,6 +1290,25 @@ defm SVRSHL_U : SInstZPZxZ<"svrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_ defm SVSQADD : SInstZPZxZ<"svsqadd", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_usqadd">; defm SVUQADD : SInstZPZxZ<"svuqadd", "csil", "dPdu", "dPdL", "aarch64_sve_suqadd">; +def SVABA_S : SInst<"svaba[_{d}]", "dddd", "csil" , MergeNone, "aarch64_sve_saba">; +def SVABA_U : SInst<"svaba[_{d}]", "dddd", "UcUsUiUl", MergeNone, "aarch64_sve_uaba">; +def SVQDMULH : SInst<"svqdmulh[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqdmulh">; +def SVQRDMULH : SInst<"svqrdmulh[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqrdmulh">; +def SVQRDMLAH : SInst<"svqrdmlah[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sqrdmlah">; +def SVQRDMLSH : SInst<"svqrdmlsh[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sqrdmlsh">; + +def SVABA_S_N : SInst<"svaba[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_saba">; +def SVABA_U_N : SInst<"svaba[_n_{d}]", "ddda", "UcUsUiUl", MergeNone, "aarch64_sve_uaba">; +def SVQDMULH_N : SInst<"svqdmulh[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqdmulh">; +def SVQRDMULH_N : SInst<"svqrdmulh[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqrdmulh">; +def SVQRDMLAH_N : SInst<"svqrdmlah[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_sqrdmlah">; +def SVQRDMLSH_N : SInst<"svqrdmlsh[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_sqrdmlsh">; + +def SVQDMULH_LANE : SInst<"svqdmulh_lane[_{d}]", "dddi", "sil", MergeNone, "aarch64_sve_sqdmulh_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>; +def SVQRDMULH_LANE : SInst<"svqrdmulh_lane[_{d}]", "dddi", "sil", MergeNone, "aarch64_sve_sqrdmulh_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>; +def SVQRDMLAH_LANE : SInst<"svqrdmlah_lane[_{d}]", "ddddi", "sil", MergeNone, "aarch64_sve_sqrdmlah_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>; +def SVQRDMLSH_LANE : SInst<"svqrdmlsh_lane[_{d}]", "ddddi", "sil", MergeNone, "aarch64_sve_sqrdmlsh_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>; + def SVQSHLU_M : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeOp1, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>; def SVQSHLU_X : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeAny, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>; def SVQSHLU_Z : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeZero, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>; diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_aba.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_aba.c new file mode 100644 index 000000000000..5ba165faf48f --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_aba.c @@ -0,0 +1,181 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svaba_s8(svint8_t op1, svint8_t op2, svint8_t op3) +{ + // CHECK-LABEL: test_svaba_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.saba.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_s8'}} + return SVE_ACLE_FUNC(svaba,_s8,,)(op1, op2, op3); +} + +svint16_t test_svaba_s16(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // CHECK-LABEL: test_svaba_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saba.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_s16'}} + return SVE_ACLE_FUNC(svaba,_s16,,)(op1, op2, op3); +} + +svint32_t test_svaba_s32(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // CHECK-LABEL: test_svaba_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saba.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_s32'}} + return SVE_ACLE_FUNC(svaba,_s32,,)(op1, op2, op3); +} + +svint64_t test_svaba_s64(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // CHECK-LABEL: test_svaba_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saba.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_s64'}} + return SVE_ACLE_FUNC(svaba,_s64,,)(op1, op2, op3); +} + +svuint8_t test_svaba_u8(svuint8_t op1, svuint8_t op2, svuint8_t op3) +{ + // CHECK-LABEL: test_svaba_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uaba.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_u8'}} + return SVE_ACLE_FUNC(svaba,_u8,,)(op1, op2, op3); +} + +svuint16_t test_svaba_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3) +{ + // CHECK-LABEL: test_svaba_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaba.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_u16'}} + return SVE_ACLE_FUNC(svaba,_u16,,)(op1, op2, op3); +} + +svuint32_t test_svaba_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3) +{ + // CHECK-LABEL: test_svaba_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaba.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_u32'}} + return SVE_ACLE_FUNC(svaba,_u32,,)(op1, op2, op3); +} + +svuint64_t test_svaba_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3) +{ + // CHECK-LABEL: test_svaba_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaba.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_u64'}} + return SVE_ACLE_FUNC(svaba,_u64,,)(op1, op2, op3); +} + +svint8_t test_svaba_n_s8(svint8_t op1, svint8_t op2, int8_t op3) +{ + // CHECK-LABEL: test_svaba_n_s8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.saba.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_n_s8'}} + return SVE_ACLE_FUNC(svaba,_n_s8,,)(op1, op2, op3); +} + +svint16_t test_svaba_n_s16(svint16_t op1, svint16_t op2, int16_t op3) +{ + // CHECK-LABEL: test_svaba_n_s16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saba.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_n_s16'}} + return SVE_ACLE_FUNC(svaba,_n_s16,,)(op1, op2, op3); +} + +svint32_t test_svaba_n_s32(svint32_t op1, svint32_t op2, int32_t op3) +{ + // CHECK-LABEL: test_svaba_n_s32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saba.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_n_s32'}} + return SVE_ACLE_FUNC(svaba,_n_s32,,)(op1, op2, op3); +} + +svint64_t test_svaba_n_s64(svint64_t op1, svint64_t op2, int64_t op3) +{ + // CHECK-LABEL: test_svaba_n_s64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saba.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_n_s64'}} + return SVE_ACLE_FUNC(svaba,_n_s64,,)(op1, op2, op3); +} + +svuint8_t test_svaba_n_u8(svuint8_t op1, svuint8_t op2, uint8_t op3) +{ + // CHECK-LABEL: test_svaba_n_u8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uaba.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_n_u8'}} + return SVE_ACLE_FUNC(svaba,_n_u8,,)(op1, op2, op3); +} + +svuint16_t test_svaba_n_u16(svuint16_t op1, svuint16_t op2, uint16_t op3) +{ + // CHECK-LABEL: test_svaba_n_u16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaba.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_n_u16'}} + return SVE_ACLE_FUNC(svaba,_n_u16,,)(op1, op2, op3); +} + +svuint32_t test_svaba_n_u32(svuint32_t op1, svuint32_t op2, uint32_t op3) +{ + // CHECK-LABEL: test_svaba_n_u32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaba.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_n_u32'}} + return SVE_ACLE_FUNC(svaba,_n_u32,,)(op1, op2, op3); +} + +svuint64_t test_svaba_n_u64(svuint64_t op1, svuint64_t op2, uint64_t op3) +{ + // CHECK-LABEL: test_svaba_n_u64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaba.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svaba'}} + // expected-warning@+1 {{implicit declaration of function 'svaba_n_u64'}} + return SVE_ACLE_FUNC(svaba,_n_u64,,)(op1, op2, op3); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmulh.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmulh.c new file mode 100644 index 000000000000..0de2aad076a6 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmulh.c @@ -0,0 +1,157 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svqdmulh_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svqdmulh_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqdmulh.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_s8'}} + return SVE_ACLE_FUNC(svqdmulh,_s8,,)(op1, op2); +} + +svint16_t test_svqdmulh_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svqdmulh_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmulh.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_s16'}} + return SVE_ACLE_FUNC(svqdmulh,_s16,,)(op1, op2); +} + +svint32_t test_svqdmulh_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svqdmulh_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmulh.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_s32'}} + return SVE_ACLE_FUNC(svqdmulh,_s32,,)(op1, op2); +} + +svint64_t test_svqdmulh_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svqdmulh_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmulh.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_s64'}} + return SVE_ACLE_FUNC(svqdmulh,_s64,,)(op1, op2); +} + +svint8_t test_svqdmulh_n_s8(svint8_t op1, int8_t op2) +{ + // CHECK-LABEL: test_svqdmulh_n_s8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqdmulh.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_n_s8'}} + return SVE_ACLE_FUNC(svqdmulh,_n_s8,,)(op1, op2); +} + +svint16_t test_svqdmulh_n_s16(svint16_t op1, int16_t op2) +{ + // CHECK-LABEL: test_svqdmulh_n_s16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmulh.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_n_s16'}} + return SVE_ACLE_FUNC(svqdmulh,_n_s16,,)(op1, op2); +} + +svint32_t test_svqdmulh_n_s32(svint32_t op1, int32_t op2) +{ + // CHECK-LABEL: test_svqdmulh_n_s32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmulh.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_n_s32'}} + return SVE_ACLE_FUNC(svqdmulh,_n_s32,,)(op1, op2); +} + +svint64_t test_svqdmulh_n_s64(svint64_t op1, int64_t op2) +{ + // CHECK-LABEL: test_svqdmulh_n_s64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmulh.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_n_s64'}} + return SVE_ACLE_FUNC(svqdmulh,_n_s64,,)(op1, op2); +} + +svint16_t test_svqdmulh_lane_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svqdmulh_lane_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmulh.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_lane_s16'}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s16,,)(op1, op2, 0); +} + +svint16_t test_svqdmulh_lane_s16_1(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svqdmulh_lane_s16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmulh.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_lane_s16'}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s16,,)(op1, op2, 7); +} + +svint32_t test_svqdmulh_lane_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svqdmulh_lane_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmulh.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_lane_s32'}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s32,,)(op1, op2, 0); +} + +svint32_t test_svqdmulh_lane_s32_1(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svqdmulh_lane_s32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmulh.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_lane_s32'}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s32,,)(op1, op2, 3); +} + +svint64_t test_svqdmulh_lane_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svqdmulh_lane_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmulh.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_lane_s64'}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s64,,)(op1, op2, 0); +} + +svint64_t test_svqdmulh_lane_s64_1(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svqdmulh_lane_s64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmulh.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqdmulh_lane_s64'}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s64,,)(op1, op2, 1); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmlah.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmlah.c new file mode 100644 index 000000000000..a142609407a6 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmlah.c @@ -0,0 +1,157 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svqrdmlah_s8(svint8_t op1, svint8_t op2, svint8_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmlah.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_s8'}} + return SVE_ACLE_FUNC(svqrdmlah,_s8,,)(op1, op2, op3); +} + +svint16_t test_svqrdmlah_s16(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmlah.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_s16'}} + return SVE_ACLE_FUNC(svqrdmlah,_s16,,)(op1, op2, op3); +} + +svint32_t test_svqrdmlah_s32(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmlah.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_s32'}} + return SVE_ACLE_FUNC(svqrdmlah,_s32,,)(op1, op2, op3); +} + +svint64_t test_svqrdmlah_s64(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmlah.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_s64'}} + return SVE_ACLE_FUNC(svqrdmlah,_s64,,)(op1, op2, op3); +} + +svint8_t test_svqrdmlah_n_s8(svint8_t op1, svint8_t op2, int8_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_n_s8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmlah.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_n_s8'}} + return SVE_ACLE_FUNC(svqrdmlah,_n_s8,,)(op1, op2, op3); +} + +svint16_t test_svqrdmlah_n_s16(svint16_t op1, svint16_t op2, int16_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_n_s16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmlah.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_n_s16'}} + return SVE_ACLE_FUNC(svqrdmlah,_n_s16,,)(op1, op2, op3); +} + +svint32_t test_svqrdmlah_n_s32(svint32_t op1, svint32_t op2, int32_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_n_s32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmlah.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_n_s32'}} + return SVE_ACLE_FUNC(svqrdmlah,_n_s32,,)(op1, op2, op3); +} + +svint64_t test_svqrdmlah_n_s64(svint64_t op1, svint64_t op2, int64_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_n_s64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmlah.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_n_s64'}} + return SVE_ACLE_FUNC(svqrdmlah,_n_s64,,)(op1, op2, op3); +} + +svint16_t test_svqrdmlah_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_lane_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmlah.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_lane_s16'}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s16,,)(op1, op2, op3, 0); +} + +svint16_t test_svqrdmlah_lane_s16_1(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_lane_s16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmlah.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_lane_s16'}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s16,,)(op1, op2, op3, 7); +} + +svint32_t test_svqrdmlah_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_lane_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmlah.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_lane_s32'}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s32,,)(op1, op2, op3, 0); +} + +svint32_t test_svqrdmlah_lane_s32_1(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_lane_s32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmlah.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_lane_s32'}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s32,,)(op1, op2, op3, 3); +} + +svint64_t test_svqrdmlah_lane_s64(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_lane_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmlah.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 0) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_lane_s64'}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s64,,)(op1, op2, op3, 0); +} + +svint64_t test_svqrdmlah_lane_s64_1(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // CHECK-LABEL: test_svqrdmlah_lane_s64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmlah.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlah_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlah_lane_s64'}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s64,,)(op1, op2, op3, 1); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmlsh.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmlsh.c new file mode 100644 index 000000000000..921bca4c7669 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmlsh.c @@ -0,0 +1,157 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svqrdmlsh_s8(svint8_t op1, svint8_t op2, svint8_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmlsh.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_s8'}} + return SVE_ACLE_FUNC(svqrdmlsh,_s8,,)(op1, op2, op3); +} + +svint16_t test_svqrdmlsh_s16(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmlsh.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_s16'}} + return SVE_ACLE_FUNC(svqrdmlsh,_s16,,)(op1, op2, op3); +} + +svint32_t test_svqrdmlsh_s32(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmlsh.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_s32'}} + return SVE_ACLE_FUNC(svqrdmlsh,_s32,,)(op1, op2, op3); +} + +svint64_t test_svqrdmlsh_s64(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmlsh.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_s64'}} + return SVE_ACLE_FUNC(svqrdmlsh,_s64,,)(op1, op2, op3); +} + +svint8_t test_svqrdmlsh_n_s8(svint8_t op1, svint8_t op2, int8_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_n_s8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmlsh.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_n_s8'}} + return SVE_ACLE_FUNC(svqrdmlsh,_n_s8,,)(op1, op2, op3); +} + +svint16_t test_svqrdmlsh_n_s16(svint16_t op1, svint16_t op2, int16_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_n_s16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmlsh.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_n_s16'}} + return SVE_ACLE_FUNC(svqrdmlsh,_n_s16,,)(op1, op2, op3); +} + +svint32_t test_svqrdmlsh_n_s32(svint32_t op1, svint32_t op2, int32_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_n_s32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmlsh.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_n_s32'}} + return SVE_ACLE_FUNC(svqrdmlsh,_n_s32,,)(op1, op2, op3); +} + +svint64_t test_svqrdmlsh_n_s64(svint64_t op1, svint64_t op2, int64_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_n_s64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmlsh.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_n_s64'}} + return SVE_ACLE_FUNC(svqrdmlsh,_n_s64,,)(op1, op2, op3); +} + +svint16_t test_svqrdmlsh_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_lane_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_lane_s16'}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s16,,)(op1, op2, op3, 0); +} + +svint16_t test_svqrdmlsh_lane_s16_1(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_lane_s16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_lane_s16'}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s16,,)(op1, op2, op3, 7); +} + +svint32_t test_svqrdmlsh_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_lane_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_lane_s32'}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s32,,)(op1, op2, op3, 0); +} + +svint32_t test_svqrdmlsh_lane_s32_1(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_lane_s32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_lane_s32'}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s32,,)(op1, op2, op3, 3); +} + +svint64_t test_svqrdmlsh_lane_s64(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_lane_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 0) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_lane_s64'}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s64,,)(op1, op2, op3, 0); +} + +svint64_t test_svqrdmlsh_lane_s64_1(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // CHECK-LABEL: test_svqrdmlsh_lane_s64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmlsh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmlsh_lane_s64'}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s64,,)(op1, op2, op3, 1); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmulh.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmulh.c new file mode 100644 index 000000000000..07efb8ff0f8c --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qrdmulh.c @@ -0,0 +1,157 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svqrdmulh_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmulh.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_s8'}} + return SVE_ACLE_FUNC(svqrdmulh,_s8,,)(op1, op2); +} + +svint16_t test_svqrdmulh_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmulh.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_s16'}} + return SVE_ACLE_FUNC(svqrdmulh,_s16,,)(op1, op2); +} + +svint32_t test_svqrdmulh_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmulh.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_s32'}} + return SVE_ACLE_FUNC(svqrdmulh,_s32,,)(op1, op2); +} + +svint64_t test_svqrdmulh_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmulh.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_s64'}} + return SVE_ACLE_FUNC(svqrdmulh,_s64,,)(op1, op2); +} + +svint8_t test_svqrdmulh_n_s8(svint8_t op1, int8_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_n_s8 + // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrdmulh.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_n_s8'}} + return SVE_ACLE_FUNC(svqrdmulh,_n_s8,,)(op1, op2); +} + +svint16_t test_svqrdmulh_n_s16(svint16_t op1, int16_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_n_s16 + // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmulh.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_n_s16'}} + return SVE_ACLE_FUNC(svqrdmulh,_n_s16,,)(op1, op2); +} + +svint32_t test_svqrdmulh_n_s32(svint32_t op1, int32_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_n_s32 + // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmulh.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_n_s32'}} + return SVE_ACLE_FUNC(svqrdmulh,_n_s32,,)(op1, op2); +} + +svint64_t test_svqrdmulh_n_s64(svint64_t op1, int64_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_n_s64 + // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2) + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmulh.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]]) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_n_s64'}} + return SVE_ACLE_FUNC(svqrdmulh,_n_s64,,)(op1, op2); +} + +svint16_t test_svqrdmulh_lane_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_lane_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmulh.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_lane_s16'}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s16,,)(op1, op2, 0); +} + +svint16_t test_svqrdmulh_lane_s16_1(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_lane_s16_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrdmulh.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_lane_s16'}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s16,,)(op1, op2, 7); +} + +svint32_t test_svqrdmulh_lane_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_lane_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmulh.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_lane_s32'}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s32,,)(op1, op2, 0); +} + +svint32_t test_svqrdmulh_lane_s32_1(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_lane_s32_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrdmulh.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_lane_s32'}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s32,,)(op1, op2, 3); +} + +svint64_t test_svqrdmulh_lane_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_lane_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmulh.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_lane_s64'}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s64,,)(op1, op2, 0); +} + +svint64_t test_svqrdmulh_lane_s64_1(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svqrdmulh_lane_s64_1 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqrdmulh.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + // overload-warning@+2 {{implicit declaration of function 'svqrdmulh_lane'}} + // expected-warning@+1 {{implicit declaration of function 'svqrdmulh_lane_s64'}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s64,,)(op1, op2, 1); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmulh.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmulh.c new file mode 100644 index 000000000000..60207813d8a3 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmulh.c @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint16_t test_svqdmulh_lane_s16(svint16_t op1, svint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s16,,)(op1, op2, -1); +} + +svint32_t test_svqdmulh_lane_s32(svint32_t op1, svint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s32,,)(op1, op2, 4); +} + +svint64_t test_svqdmulh_lane_s64(svint64_t op1, svint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} + return SVE_ACLE_FUNC(svqdmulh_lane,_s64,,)(op1, op2, 2); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmlah.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmlah.c new file mode 100644 index 000000000000..94a5f351d6e8 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmlah.c @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint16_t test_svqrdmlah_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s16,,)(op1, op2, op3, -1); +} + +svint32_t test_svqrdmlah_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s32,,)(op1, op2, op3, 4); +} + +svint64_t test_svqrdmlah_lane_s64(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} + return SVE_ACLE_FUNC(svqrdmlah_lane,_s64,,)(op1, op2, op3, 2); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmlsh.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmlsh.c new file mode 100644 index 000000000000..5ca6e6e1917a --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmlsh.c @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint16_t test_svqrdmlsh_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s16,,)(op1, op2, op3, -1); +} + +svint32_t test_svqrdmlsh_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s32,,)(op1, op2, op3, 4); +} + +svint64_t test_svqrdmlsh_lane_s64(svint64_t op1, svint64_t op2, svint64_t op3) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} + return SVE_ACLE_FUNC(svqrdmlsh_lane,_s64,,)(op1, op2, op3, 2); +} diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmulh.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmulh.c new file mode 100644 index 000000000000..ead75193692d --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qrdmulh.c @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#include <arm_sve.h> + +svint16_t test_svqrdmulh_lane_s16(svint16_t op1, svint16_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s16,,)(op1, op2, -1); +} + +svint32_t test_svqrdmulh_lane_s32(svint32_t op1, svint32_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s32,,)(op1, op2, 4); +} + +svint64_t test_svqrdmulh_lane_s64(svint64_t op1, svint64_t op2) +{ + // expected-error-re@+1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}} + return SVE_ACLE_FUNC(svqrdmulh_lane,_s64,,)(op1, op2, 2); +} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits