sdesmalen created this revision.
sdesmalen added reviewers: SjoerdMeijer, efriedma, ctetreau.
Herald added a subscriber: tschuett.
Herald added a project: clang.
sdesmalen added a parent revision: D78748: [SveEmitter] Add builtins for 
svld1rq.

- svdupq builtins that duplicate scalars to every quadword of a vector are 
defined using builtins for svld1rq (load and replicate quadword).
- svdupq builtins that duplicate boolean values to fill a predicate vector are 
defined using `svcmpne`.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D78750

Files:
  clang/include/clang/Basic/arm_sve.td
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c
  clang/utils/TableGen/SveEmitter.cpp

Index: clang/utils/TableGen/SveEmitter.cpp
===================================================================
--- clang/utils/TableGen/SveEmitter.cpp
+++ clang/utils/TableGen/SveEmitter.cpp
@@ -1048,6 +1048,8 @@
   OS << "} sv_prfop;\n\n";
 
   OS << "/* Function attributes */\n";
+  OS << "#define __ai static inline __attribute__((__always_inline__, "
+        "__nodebug__))\n";
   OS << "#define __aio static inline __attribute__((__always_inline__, "
         "__nodebug__, __overloadable__))\n\n";
 
@@ -1087,6 +1089,102 @@
   if (!InGuard.empty())
     OS << "#endif  //" << InGuard << "\n";
 
+OS << "#define MAKE_DUPQ_16(TYPE,SUFFIX,PTRUE) \\\n";
+OS << " __ai sv##TYPE svdupq_##SUFFIX(TYPE x0, TYPE x1,\\\n";
+OS << "                               TYPE x2, TYPE x3,\\\n";
+OS << "                               TYPE x4, TYPE x5,\\\n";
+OS << "                               TYPE x6, TYPE x7,\\\n";
+OS << "                               TYPE x8, TYPE x9,\\\n";
+OS << "                               TYPE x10, TYPE x11,\\\n";
+OS << "                               TYPE x12, TYPE x13,\\\n";
+OS << "                               TYPE x14, TYPE x15) {\\\n";
+OS << "  TYPE x[] = { x0, x1, x2, x3, x4, x5, x6, x7,\\\n";
+OS << "               x8, x9, x10, x11, x12, x13, x14, x15 };\\\n";
+OS << "  return svld1rq_##SUFFIX(PTRUE, x);\\\n";
+OS << "}\n";
+OS << "MAKE_DUPQ_16(int8_t, s8, svptrue_b8())\n";
+OS << "MAKE_DUPQ_16(uint8_t, u8, svptrue_b8())\n";
+OS << "#define svdupq_n_s8 svdupq_s8\n";
+OS << "#define svdupq_n_u8 svdupq_u8\n\n";
+
+OS << "#define MAKE_DUPQ_8(TYPE,SUFFIX,PTRUE) \\\n";
+OS << " __ai sv##TYPE svdupq_##SUFFIX(TYPE x0, TYPE x1,\\\n";
+OS << "                               TYPE x2, TYPE x3,\\\n";
+OS << "                               TYPE x4, TYPE x5,\\\n";
+OS << "                               TYPE x6, TYPE x7) {\\\n";
+OS << "  TYPE x[] = { x0, x1, x2, x3, x4, x5, x6, x7 };\\\n";
+OS << "  return svld1rq_##SUFFIX(PTRUE, x);\\\n";
+OS << "}\n";
+OS << "MAKE_DUPQ_8(int16_t, s16, svptrue_b16())\n";
+OS << "MAKE_DUPQ_8(uint16_t, u16, svptrue_b16())\n";
+OS << "#define svdupq_n_s16 svdupq_s16\n";
+OS << "#define svdupq_n_u16 svdupq_u16\n\n";
+
+// float_t cannot be passed as a parameter so implement as a macro.
+OS << "#define svdupq_f16(x0, x1, x2, x3, "
+                         "x4, x5, x6, x7) __extension__ ({\\\n";
+OS << "  float16_t x[] = { (x0), (x1), (x2), (x3), "
+                          "(x4), (x5), (x6), (x7) };\\\n";
+OS << "  svfloat16_t __ret = svld1rq_f16(svptrue_b16(), x);\\\n";
+OS << "  __ret;\\\n";
+OS << "})\n";
+OS << "#define svdupq_n_f16 svdupq_f16\n\n";
+
+OS << "#define MAKE_DUPQ_4(TYPE,SUFFIX,PTRUE) \\\n";
+OS << " __ai sv##TYPE svdupq_##SUFFIX(TYPE x0, TYPE x1,\\\n";
+OS << "                               TYPE x2, TYPE x3) {\\\n";
+OS << "  TYPE x[] = { x0, x1, x2, x3 };\\\n";
+OS << "  return svld1rq_##SUFFIX(PTRUE, x);\\\n";
+OS << "}\n";
+OS << "MAKE_DUPQ_4(int32_t, s32, svptrue_b32())\n";
+OS << "MAKE_DUPQ_4(uint32_t, u32, svptrue_b32())\n";
+OS << "MAKE_DUPQ_4(float32_t, f32, svptrue_b32())\n";
+OS << "#define svdupq_n_s32 svdupq_s32\n";
+OS << "#define svdupq_n_u32 svdupq_u32\n";
+OS << "#define svdupq_n_f32 svdupq_f32\n\n";
+
+OS << "#define MAKE_DUPQ_2(TYPE,SUFFIX,PTRUE) \\\n";
+OS << " __ai sv##TYPE svdupq_##SUFFIX(TYPE x0, TYPE x1) {\\\n";
+OS << "  TYPE x[] = { x0, x1 };\\\n";
+OS << "  return svld1rq_##SUFFIX(PTRUE, x);\\\n";
+OS << "}\n";
+OS << "MAKE_DUPQ_2(int64_t, s64, svptrue_b64())\n";
+OS << "MAKE_DUPQ_2(uint64_t, u64, svptrue_b64())\n";
+OS << "MAKE_DUPQ_2(float64_t, f64, svptrue_b64())\n";
+OS << "#define svdupq_n_u64 svdupq_u64\n";
+OS << "#define svdupq_n_s64 svdupq_s64\n";
+OS << "#define svdupq_n_f64 svdupq_f64\n\n";
+
+OS << "__ai svbool_t svdupq_b8(bool x0,  bool x1,  bool x2,  bool x3,\n";
+OS << "                        bool x4,  bool x5,  bool x6,  bool x7,\n";
+OS << "                        bool x8,  bool x9,  bool x10, bool x11,\n";
+OS << "                        bool x12, bool x13, bool x14, bool x15) {\n";
+OS << "  svint8_t x = svdupq_s8(x0, x1, x2,  x3,  x4,  x5,  x6,  x7,\n";
+OS << "                         x8, x9, x10, x11, x12, x13, x14, x15);\n";
+OS << "  return svcmpne_wide(svptrue_b8(), x, 0);\n";
+OS << "}\n\n";
+
+OS << "__ai svbool_t svdupq_b16(bool x0, bool x1, bool x2, bool x3,\n";
+OS << "                         bool x4, bool x5, bool x6, bool x7) {\n";
+OS << "  svint16_t x = svdupq_s16(x0, x1, x2, x3, x4, x5, x6, x7);\n";
+OS << "  return svcmpne_wide(svptrue_b16(), x, 0);\n";
+OS << "}\n\n";
+
+OS << "__ai svbool_t svdupq_b32(bool x0, bool x1, bool x2, bool x3) {\n";
+OS << "  svint32_t x = svdupq_s32(x0, x1, x2, x3);\n";
+OS << "  return svcmpne_wide(svptrue_b32(), x, 0);\n";
+OS << "}\n\n";
+
+OS << "__ai svbool_t svdupq_b64(bool x0, bool x1) {\n";
+OS << "  svint64_t x = svdupq_s64(x0, x1);\n";
+OS << "  return svcmpne(svptrue_b64(), x, 0);\n";
+OS << "}\n\n";
+
+OS << "#define svdupq_n_b8 svdupq_b8\n";
+OS << "#define svdupq_n_b16 svdupq_b16\n";
+OS << "#define svdupq_n_b32 svdupq_b32\n";
+OS << "#define svdupq_n_b64 svdupq_b64\n\n";
+
   OS << "#if defined(__ARM_FEATURE_SVE2)\n";
   OS << "#define svcvtnt_f16_x      svcvtnt_f16_m\n";
   OS << "#define svcvtnt_f16_f32_x  svcvtnt_f16_f32_m\n";
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c
@@ -0,0 +1,367 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svdupq_lane_s8(svint8_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %data, i64 %index)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_s8,,)(data, index);
+}
+
+svint16_t test_svdupq_lane_s16(svint16_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %data, i64 %index)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_s16,,)(data, index);
+}
+
+svint32_t test_svdupq_lane_s32(svint32_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %data, i64 %index)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_s32,,)(data, index);
+}
+
+svint64_t test_svdupq_lane_s64(svint64_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %data, i64 %index)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_s64,,)(data, index);
+}
+
+svuint8_t test_svdupq_lane_u8(svuint8_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %data, i64 %index)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_u8,,)(data, index);
+}
+
+svuint16_t test_svdupq_lane_u16(svuint16_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %data, i64 %index)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_u16,,)(data, index);
+}
+
+svuint32_t test_svdupq_lane_u32(svuint32_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %data, i64 %index)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_u32,,)(data, index);
+}
+
+svuint64_t test_svdupq_lane_u64(svuint64_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %data, i64 %index)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_u64,,)(data, index);
+}
+
+svfloat16_t test_svdupq_lane_f16(svfloat16_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_f16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %data, i64 %index)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_f16,,)(data, index);
+}
+
+svfloat32_t test_svdupq_lane_f32(svfloat32_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_f32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %data, i64 %index)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_f32,,)(data, index);
+}
+
+svfloat64_t test_svdupq_lane_f64(svfloat64_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdupq_lane_f64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %data, i64 %index)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdupq_lane,_f64,,)(data, index);
+}
+
+svint8_t test_svdupq_n_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3,
+                          int8_t x4, int8_t x5, int8_t x6, int8_t x7,
+                          int8_t x8, int8_t x9, int8_t x10, int8_t x11,
+                          int8_t x12, int8_t x13, int8_t x14, int8_t x15)
+{
+  // CHECK-LABEL: test_svdupq_n_s8
+  // CHECK: %[[ALLOCA:.*]] = alloca [16 x i8]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i8 %x0, i8* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 15
+  // CHECK: store i8 %x15, i8* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: %[[PTRUE:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], i8* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 16 x i8> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_s8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+}
+
+svint16_t test_svdupq_n_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3,
+                            int16_t x4, int16_t x5, int16_t x6, int16_t x7)
+{
+  // CHECK-LABEL: test_svdupq_n_s16
+  // CHECK: %[[ALLOCA:.*]] = alloca [8 x i16]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i16 %x0, i16* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 7
+  // CHECK: store i16 %x7, i16* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %{{.*}}, i16* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 8 x i16> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_s16,)(x0, x1, x2, x3, x4, x5, x6, x7);
+}
+
+svint32_t test_svdupq_n_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3)
+{
+  // CHECK-LABEL: test_svdupq_n_s32
+  // CHECK: %[[ALLOCA:.*]] = alloca [4 x i32]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i32 %x0, i32* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 3
+  // CHECK: store i32 %x3, i32* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %{{.*}}, i32* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_s32,)(x0, x1, x2, x3);
+}
+
+svint64_t test_svdupq_n_s64(int64_t x0, int64_t x1)
+{
+  // CHECK-LABEL: test_svdupq_n_s64
+  // CHECK: %[[ALLOCA:.*]] = alloca [2 x i64]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i64 %x0, i64* %[[BASE]]
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 1
+  // CHECK: store i64 %x1, i64* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %{{.*}}, i64* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_s64,)(x0, x1);
+}
+
+svuint8_t test_svdupq_n_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3,
+                           uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7,
+                           uint8_t x8, uint8_t x9, uint8_t x10, uint8_t x11,
+                           uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15)
+{
+  // CHECK-LABEL: test_svdupq_n_u8
+  // CHECK: %[[ALLOCA:.*]] = alloca [16 x i8]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i8 %x0, i8* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 15
+  // CHECK: store i8 %x15, i8* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: %[[PTRUE:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], i8* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 16 x i8> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_u8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+}
+
+svuint16_t test_svdupq_n_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3,
+                             uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7)
+{
+  // CHECK-LABEL: test_svdupq_n_u16
+  // CHECK: %[[ALLOCA:.*]] = alloca [8 x i16]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i16 %x0, i16* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 7
+  // CHECK: store i16 %x7, i16* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %{{.*}}, i16* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 8 x i16> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_u16,)(x0, x1, x2, x3, x4, x5, x6, x7);
+}
+
+svuint32_t test_svdupq_n_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3)
+{
+  // CHECK-LABEL: test_svdupq_n_u32
+  // CHECK: %[[ALLOCA:.*]] = alloca [4 x i32]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i32 %x0, i32* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 3
+  // CHECK: store i32 %x3, i32* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %{{.*}}, i32* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_u32,)(x0, x1, x2, x3);
+}
+
+svuint64_t test_svdupq_n_u64(uint64_t x0, uint64_t x1)
+{
+  // CHECK-LABEL: test_svdupq_n_u64
+  // CHECK: %[[ALLOCA:.*]] = alloca [2 x i64]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i64 %x0, i64* %[[BASE]]
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 1
+  // CHECK: store i64 %x1, i64* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %{{.*}}, i64* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_u64,)(x0, x1);
+}
+
+svfloat16_t test_svdupq_n_f16(float16_t x0, float16_t x1, float16_t x2, float16_t x3,
+                              float16_t x4, float16_t x5, float16_t x6, float16_t x7)
+{
+  // CHECK-LABEL: test_svdupq_n_f16
+  // CHECK: %[[ALLOCA:.*]] = alloca [8 x half]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [8 x half], [8 x half]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store half %x0, half* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [8 x half], [8 x half]* %[[ALLOCA]], i64 0, i64 7
+  // CHECK: store half %x7, half* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %{{.*}}, half* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 8 x half> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_f16,)(x0, x1, x2, x3, x4, x5, x6, x7);
+}
+
+svfloat32_t test_svdupq_n_f32(float32_t x0, float32_t x1, float32_t x2, float32_t x3)
+{
+  // CHECK-LABEL: test_svdupq_n_f32
+  // CHECK: %[[ALLOCA:.*]] = alloca [4 x float]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [4 x float], [4 x float]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store float %x0, float* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x float], [4 x float]* %[[ALLOCA]], i64 0, i64 3
+  // CHECK: store float %x3, float* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %{{.*}}, float* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 4 x float> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_f32,)(x0, x1, x2, x3);
+}
+
+svfloat64_t test_svdupq_n_f64(float64_t x0, float64_t x1)
+{
+  // CHECK-LABEL: test_svdupq_n_f64
+  // CHECK: %[[ALLOCA:.*]] = alloca [2 x double]
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [2 x double], [2 x double]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store double %x0, double* %[[BASE]]
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x double], [2 x double]* %[[ALLOCA]], i64 0, i64 1
+  // CHECK: store double %x1, double* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %{{.*}}, double* nonnull %[[BASE]])
+  // CHECK: ret <vscale x 2 x double> %[[LOAD]]
+  return SVE_ACLE_FUNC(svdupq,_n,_f64,)(x0, x1);
+}
+
+svbool_t test_svdupq_n_b8(bool x0, bool x1, bool x2, bool x3,
+                          bool x4, bool x5, bool x6, bool x7,
+                          bool x8, bool x9, bool x10, bool x11,
+                          bool x12, bool x13, bool x14, bool x15)
+{
+  // CHECK-LABEL: test_svdupq_n_b8
+  // CHECK: %[[ALLOCA:.*]] = alloca [16 x i8]
+  // CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i8
+  // CHECK-DAG: %[[X15:.*]] = zext i1 %x15 to i8
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i8 %[[X0]], i8* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 15
+  // CHECK: store i8 %[[X15]], i8* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: %[[PTRUE:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], i8* nonnull %[[BASE]])
+  // CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
+  // CHECK: %[[CMP:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], <vscale x 16 x i8> %[[LOAD]], <vscale x 2 x i64> %[[ZERO]])
+  // CHECK: ret <vscale x 16 x i1> %[[CMP]]
+  return SVE_ACLE_FUNC(svdupq,_n,_b8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+}
+
+svbool_t test_svdupq_n_b16(bool x0, bool x1, bool x2, bool x3,
+                           bool x4, bool x5, bool x6, bool x7)
+{
+  // CHECK-LABEL: test_svdupq_n_b16
+  // CHECK: %[[ALLOCA:.*]] = alloca [8 x i16]
+  // CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i16
+  // CHECK-DAG: %[[X7:.*]] = zext i1 %x7 to i16
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i16 %[[X0]], i16* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 7
+  // CHECK: store i16 %[[X7]], i16* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: %[[PTRUE:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %{{.*}}, i16* nonnull %[[BASE]])
+  // CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
+  // CHECK: %[[CMP:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %{{.*}}, <vscale x 8 x i16> %[[LOAD]], <vscale x 2 x i64> %[[ZERO]])
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[CMP]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svdupq,_n,_b16,)(x0, x1, x2, x3, x4, x5, x6, x7);
+}
+
+svbool_t test_svdupq_n_b32(bool x0, bool x1, bool x2, bool x3)
+{
+  // CHECK-LABEL: test_svdupq_n_b32
+  // CHECK: %[[ALLOCA:.*]] = alloca [4 x i32]
+  // CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i32
+  // CHECK-DAG: %[[X3:.*]] = zext i1 %x3 to i32
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i32 %[[X0]], i32* %[[BASE]]
+  // <assume other stores>
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 3
+  // CHECK: store i32 %[[X3]], i32* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: %[[PTRUE:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %{{.*}}, i32* nonnull %[[BASE]])
+  // CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %{{.*}}, <vscale x 4 x i32> %[[LOAD]], <vscale x 2 x i64> %[[ZERO]])
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svdupq,_n,_b32,)(x0, x1, x2, x3);
+}
+
+svbool_t test_svdupq_n_b64(bool x0, bool x1)
+{
+  // CHECK-LABEL: test_svdupq_n_b64
+  // CHECK: %[[ALLOCA:.*]] = alloca [2 x i64]
+  // CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i64
+  // CHECK-DAG: %[[X1:.*]] = zext i1 %x1 to i64
+  // CHECK: %[[BASE:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 0
+  // CHECK: store i64 %[[X0]], i64* %[[BASE]]
+  // CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 1
+  // CHECK: store i64 %[[X1]], i64* %[[GEP]]
+  // CHECK-NOT: store
+  // CHECK: %[[PTRUE:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
+  // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %{{.*}}, i64* nonnull %[[BASE]])
+  // CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %{{.*}}, <vscale x 2 x i64> %[[LOAD]], <vscale x 2 x i64> %[[ZERO]])
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svdupq,_n,_b64,)(x0, x1);
+}
Index: clang/include/clang/Basic/arm_sve.td
===================================================================
--- clang/include/clang/Basic/arm_sve.td
+++ clang/include/clang/Basic/arm_sve.td
@@ -864,6 +864,7 @@
 def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi",  "hfd", MergeOp1,  "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
 def SVCMLA_M : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeOp1,  "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
 
+def SVDUPQ_LANE  : SInst<"svdupq_lane[_{d}]", "ddn",  "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_dupq_lane">;
 
 ////////////////////////////////////////////////////////////////////////////////
 // Predicate creation
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to