sdesmalen updated this revision to Diff 257829.
sdesmalen retitled this revision from "[SveEmitter] Add NoAuto flag and 
builtins for svwhile." to "[SveEmitter] Add builtins for svwhile".
sdesmalen edited the summary of this revision.
sdesmalen added a comment.

- The function that returns a list of overloaded types for the intrinsics added 
in D77596 <https://reviews.llvm.org/D77596> means we don't need to use custom 
codegen for the while intrinsics any more.


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77595/new/

https://reviews.llvm.org/D77595

Files:
  clang/include/clang/Basic/TargetBuiltins.h
  clang/include/clang/Basic/arm_sve.td
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilele.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilelt.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilege.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilegt.c

Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilegt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilegt.c
@@ -0,0 +1,185 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilegt_b8_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b8_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_s32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b16_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_s32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b32_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_s32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b64_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_s32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b8_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_u32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b16_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_u32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b32_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_u32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b64_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_u32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b8_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_s64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b16_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_s64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b32_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_s64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b64_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_s64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b8_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_u64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b16_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_u64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b32_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_u64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b64_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_u64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b64,_u64,,)(op1, op2);
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilege.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilege.c
@@ -0,0 +1,185 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilege_b8_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b8_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_s32'}}
+  return SVE_ACLE_FUNC(svwhilege_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b16_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_s32'}}
+  return SVE_ACLE_FUNC(svwhilege_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b32_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_s32'}}
+  return SVE_ACLE_FUNC(svwhilege_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b64_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_s32'}}
+  return SVE_ACLE_FUNC(svwhilege_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b8_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_u32'}}
+  return SVE_ACLE_FUNC(svwhilege_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b16_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_u32'}}
+  return SVE_ACLE_FUNC(svwhilege_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b32_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_u32'}}
+  return SVE_ACLE_FUNC(svwhilege_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b64_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_u32'}}
+  return SVE_ACLE_FUNC(svwhilege_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b8_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_s64'}}
+  return SVE_ACLE_FUNC(svwhilege_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b16_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_s64'}}
+  return SVE_ACLE_FUNC(svwhilege_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b32_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_s64'}}
+  return SVE_ACLE_FUNC(svwhilege_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b64_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_s64'}}
+  return SVE_ACLE_FUNC(svwhilege_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b8_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_u64'}}
+  return SVE_ACLE_FUNC(svwhilege_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b16_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_u64'}}
+  return SVE_ACLE_FUNC(svwhilege_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b32_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_u64'}}
+  return SVE_ACLE_FUNC(svwhilege_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b64_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_u64'}}
+  return SVE_ACLE_FUNC(svwhilege_b64,_u64,,)(op1, op2);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilelt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilelt.c
@@ -0,0 +1,151 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilelt_b8_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b8_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilelt_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b16_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b32_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b64_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b8_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilelt_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b16_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b32_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b64_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b8_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilelt_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b16_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b32_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b64_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b8_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilelt_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b16_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b32_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b64_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b64,_u64,,)(op1, op2);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilele.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilele.c
@@ -0,0 +1,151 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilele_b8_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b8_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilele_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b16_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b32_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b64_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b8_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilele_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b16_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b32_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b64_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b8_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilele_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b16_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b32_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b64_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b8_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilele_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b16_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b32_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b64_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b64,_u64,,)(op1, op2);
+}
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -7597,6 +7597,9 @@
 
   llvm::Type *DefaultType = getSVEType(TypeFlags);
 
+  if (TypeFlags.isOverloadWhile())
+    return {DefaultType, Ops[1]->getType()};
+
   assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
   return {DefaultType};
 }
Index: clang/include/clang/Basic/arm_sve.td
===================================================================
--- clang/include/clang/Basic/arm_sve.td
+++ clang/include/clang/Basic/arm_sve.td
@@ -167,6 +167,7 @@
 def IsStructStore             : FlagType<0x00040000>;
 def IsZExtReturn              : FlagType<0x00080000>; // Return value is sign-extend by default
 def IsOverloadNone            : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
+def IsOverloadWhile           : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
 def OverloadKindMask          : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
 
 // These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
@@ -369,6 +370,18 @@
 def SVQSHLU_M  : SInst<"svqshlu[_n_{d}]", "uPdi", "csil",         MergeOp1,  "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft,  1>]>;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+// While comparisons
+
+def SVWHILELE_S32 : SInst<"svwhilele_{d}[_{1}]", "Pkk", "PcPsPiPl",     MergeNone, "aarch64_sve_whilele", [IsOverloadWhile]>;
+def SVWHILELE_S64 : SInst<"svwhilele_{d}[_{1}]", "Pll", "PcPsPiPl",     MergeNone, "aarch64_sve_whilele", [IsOverloadWhile]>;
+def SVWHILELO_U32 : SInst<"svwhilelt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhile]>;
+def SVWHILELO_U64 : SInst<"svwhilelt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhile]>;
+def SVWHILELS_U32 : SInst<"svwhilele_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhile]>;
+def SVWHILELS_U64 : SInst<"svwhilele_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhile]>;
+def SVWHILELT_S32 : SInst<"svwhilelt_{d}[_{1}]", "Pkk", "PcPsPiPl",     MergeNone, "aarch64_sve_whilelt", [IsOverloadWhile]>;
+def SVWHILELT_S64 : SInst<"svwhilelt_{d}[_{1}]", "Pll", "PcPsPiPl",     MergeNone, "aarch64_sve_whilelt", [IsOverloadWhile]>;
+
 ////////////////////////////////////////////////////////////////////////////////
 // Floating-point arithmetic
 
@@ -407,3 +420,16 @@
 // Integer arithmetic
 def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]",  "ddqqi",  "il",   MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
 def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]",  "ddqqi",  "UiUl", MergeNone, "aarch64_sve_udot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 WhileGE/GT
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVWHILEGE_S32 : SInst<"svwhilege_{d}[_{1}]", "Pkk", "PcPsPiPl",     MergeNone, "aarch64_sve_whilege", [IsOverloadWhile]>;
+def SVWHILEGE_S64 : SInst<"svwhilege_{d}[_{1}]", "Pll", "PcPsPiPl",     MergeNone, "aarch64_sve_whilege", [IsOverloadWhile]>;
+def SVWHILEGT_S32 : SInst<"svwhilegt_{d}[_{1}]", "Pkk", "PcPsPiPl",     MergeNone, "aarch64_sve_whilegt", [IsOverloadWhile]>;
+def SVWHILEGT_S64 : SInst<"svwhilegt_{d}[_{1}]", "Pll", "PcPsPiPl",     MergeNone, "aarch64_sve_whilegt", [IsOverloadWhile]>;
+def SVWHILEHI_U32 : SInst<"svwhilegt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhile]>;
+def SVWHILEHI_U64 : SInst<"svwhilegt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhile]>;
+def SVWHILEHS_U32 : SInst<"svwhilege_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhile]>;
+def SVWHILEHS_U64 : SInst<"svwhilege_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhile]>;
+}
Index: clang/include/clang/Basic/TargetBuiltins.h
===================================================================
--- clang/include/clang/Basic/TargetBuiltins.h
+++ clang/include/clang/Basic/TargetBuiltins.h
@@ -233,6 +233,7 @@
     bool isStructStore() const { return Flags & IsStructStore; }
     bool isZExtReturn() const { return Flags & IsZExtReturn; }
     bool isOverloadNone() const { return Flags & IsOverloadNone; }
+    bool isOverloadWhile() const { return Flags & IsOverloadWhile; }
     bool isOverloadDefault() const { return !(Flags & OverloadKindMask); }
 
     uint64_t getBits() const { return Flags; }
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to