sdesmalen created this revision.
sdesmalen added reviewers: SjoerdMeijer, efriedma, rovka.
Herald added a subscriber: tschuett.
Herald added a project: clang.
sdesmalen added a parent revision: D77594: [SveEmitter] Add support for _n form 
builtins.
sdesmalen added a child revision: D77596: [SveEmitter] Add NoOverload flag and 
builtin for svpfalse.

Add the NoAuto flag for intrinsics that can't easily be code-generated
using the default overloaded type and are better handled special-cased
in a switch statement.

This patch also adds all svwhile builtins.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D77595

Files:
  clang/include/clang/Basic/TargetBuiltins.h
  clang/include/clang/Basic/arm_sve.td
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilele.c
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilelt.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilege.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilegt.c
  clang/utils/TableGen/SveEmitter.cpp

Index: clang/utils/TableGen/SveEmitter.cpp
===================================================================
--- clang/utils/TableGen/SveEmitter.cpp
+++ clang/utils/TableGen/SveEmitter.cpp
@@ -520,6 +520,13 @@
     Immediate = true;
     PredicatePattern = true;
     break;
+  case 'k':
+    Predicate = false;
+    Signed = true;
+    Float = false;
+    ElementBitwidth = Bitwidth = 32;
+    NumVectors = 0;
+    break;
   case 'l':
     Predicate = false;
     Signed = true;
@@ -527,6 +534,20 @@
     ElementBitwidth = Bitwidth = 64;
     NumVectors = 0;
     break;
+  case 'm':
+    Predicate = false;
+    Signed = false;
+    Float = false;
+    ElementBitwidth = Bitwidth = 32;
+    NumVectors = 0;
+    break;
+  case 'n':
+    Predicate = false;
+    Signed = false;
+    Float = false;
+    ElementBitwidth = Bitwidth = 64;
+    NumVectors = 0;
+    break;
   case 'S':
     Constant = true;
     Pointer = true;
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilegt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilegt.c
@@ -0,0 +1,185 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilegt_b8_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b8_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_s32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b16_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_s32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b32_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_s32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b64_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_s32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b8_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_u32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b16_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_u32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b32_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_u32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b64_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_u32'}}
+  return SVE_ACLE_FUNC(svwhilegt_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b8_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_s64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b16_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_s64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b32_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_s64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b64_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_s64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b8_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_u64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b16_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_u64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b32_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_u64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilegt_b64_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_u64'}}
+  return SVE_ACLE_FUNC(svwhilegt_b64,_u64,,)(op1, op2);
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilege.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_whilege.c
@@ -0,0 +1,185 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilege_b8_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b8_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_s32'}}
+  return SVE_ACLE_FUNC(svwhilege_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b16_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_s32'}}
+  return SVE_ACLE_FUNC(svwhilege_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b32_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_s32'}}
+  return SVE_ACLE_FUNC(svwhilege_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b64_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_s32'}}
+  return SVE_ACLE_FUNC(svwhilege_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b8_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_u32'}}
+  return SVE_ACLE_FUNC(svwhilege_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b16_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_u32'}}
+  return SVE_ACLE_FUNC(svwhilege_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b32_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_u32'}}
+  return SVE_ACLE_FUNC(svwhilege_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b64_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_u32'}}
+  return SVE_ACLE_FUNC(svwhilege_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b8_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_s64'}}
+  return SVE_ACLE_FUNC(svwhilege_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b16_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_s64'}}
+  return SVE_ACLE_FUNC(svwhilege_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b32_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_s64'}}
+  return SVE_ACLE_FUNC(svwhilege_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b64_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_s64'}}
+  return SVE_ACLE_FUNC(svwhilege_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b8_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_u64'}}
+  return SVE_ACLE_FUNC(svwhilege_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b16_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_u64'}}
+  return SVE_ACLE_FUNC(svwhilege_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b32_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_u64'}}
+  return SVE_ACLE_FUNC(svwhilege_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilege_b64_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+  // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_u64'}}
+  return SVE_ACLE_FUNC(svwhilege_b64,_u64,,)(op1, op2);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilelt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilelt.c
@@ -0,0 +1,151 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilelt_b8_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b8_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilelt_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b16_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b32_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b64_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b8_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilelt_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b16_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b32_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b64_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b8_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilelt_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b16_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b32_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b64_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b8_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilelt_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b16_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b32_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilelt_b64_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilelt_b64,_u64,,)(op1, op2);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilele.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_whilele.c
@@ -0,0 +1,151 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilele_b8_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b8_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilele_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b16_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b32_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_s32(int32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b64_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b8_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 %op1, i32 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilele_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b16_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b32_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_u32(uint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b64_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %op1, i32 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b8_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilele_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b16_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b32_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_s64(int64_t op1, int64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b64_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b8_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 %op1, i64 %op2)
+  // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svwhilele_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b16_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b32_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_u64(uint64_t op1, uint64_t op2)
+{
+  // CHECK-LABEL: test_svwhilele_b64_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 %op1, i64 %op2)
+  // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+  // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+  return SVE_ACLE_FUNC(svwhilele_b64,_u64,,)(op1, op2);
+}
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -7625,7 +7625,7 @@
                              TypeFlags.isZExtReturn());
   else if (TypeFlags.isStore())
     return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
-  else if (Builtin->LLVMIntrinsic != 0) {
+  else if (Builtin->LLVMIntrinsic != 0 && !TypeFlags.isNoAuto()) {
     llvm::Type* OverloadedTy = getSVEType(TypeFlags);
 
     if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
@@ -7660,6 +7660,83 @@
 		return Call;
   }
 
+  switch (BuiltinID) {
+  default:
+    return nullptr;
+  case SVE::BI__builtin_sve_svwhilele_b8_s32:
+  case SVE::BI__builtin_sve_svwhilele_b8_s64:
+  case SVE::BI__builtin_sve_svwhilele_b8_u32:
+  case SVE::BI__builtin_sve_svwhilele_b8_u64:
+  case SVE::BI__builtin_sve_svwhilele_b16_s32:
+  case SVE::BI__builtin_sve_svwhilele_b16_s64:
+  case SVE::BI__builtin_sve_svwhilele_b16_u32:
+  case SVE::BI__builtin_sve_svwhilele_b16_u64:
+  case SVE::BI__builtin_sve_svwhilele_b32_s32:
+  case SVE::BI__builtin_sve_svwhilele_b32_s64:
+  case SVE::BI__builtin_sve_svwhilele_b32_u32:
+  case SVE::BI__builtin_sve_svwhilele_b32_u64:
+  case SVE::BI__builtin_sve_svwhilele_b64_s32:
+  case SVE::BI__builtin_sve_svwhilele_b64_s64:
+  case SVE::BI__builtin_sve_svwhilele_b64_u32:
+  case SVE::BI__builtin_sve_svwhilele_b64_u64:
+  case SVE::BI__builtin_sve_svwhilelt_b8_s32:
+  case SVE::BI__builtin_sve_svwhilelt_b8_s64:
+  case SVE::BI__builtin_sve_svwhilelt_b8_u32:
+  case SVE::BI__builtin_sve_svwhilelt_b8_u64:
+  case SVE::BI__builtin_sve_svwhilelt_b16_s32:
+  case SVE::BI__builtin_sve_svwhilelt_b16_s64:
+  case SVE::BI__builtin_sve_svwhilelt_b16_u32:
+  case SVE::BI__builtin_sve_svwhilelt_b16_u64:
+  case SVE::BI__builtin_sve_svwhilelt_b32_s32:
+  case SVE::BI__builtin_sve_svwhilelt_b32_s64:
+  case SVE::BI__builtin_sve_svwhilelt_b32_u32:
+  case SVE::BI__builtin_sve_svwhilelt_b32_u64:
+  case SVE::BI__builtin_sve_svwhilelt_b64_s32:
+  case SVE::BI__builtin_sve_svwhilelt_b64_s64:
+  case SVE::BI__builtin_sve_svwhilelt_b64_u32:
+  case SVE::BI__builtin_sve_svwhilelt_b64_u64:
+  case SVE::BI__builtin_sve_svwhilege_b8_s32:
+  case SVE::BI__builtin_sve_svwhilege_b8_s64:
+  case SVE::BI__builtin_sve_svwhilege_b8_u32:
+  case SVE::BI__builtin_sve_svwhilege_b8_u64:
+  case SVE::BI__builtin_sve_svwhilege_b16_s32:
+  case SVE::BI__builtin_sve_svwhilege_b16_s64:
+  case SVE::BI__builtin_sve_svwhilege_b16_u32:
+  case SVE::BI__builtin_sve_svwhilege_b16_u64:
+  case SVE::BI__builtin_sve_svwhilege_b32_s32:
+  case SVE::BI__builtin_sve_svwhilege_b32_s64:
+  case SVE::BI__builtin_sve_svwhilege_b32_u32:
+  case SVE::BI__builtin_sve_svwhilege_b32_u64:
+  case SVE::BI__builtin_sve_svwhilege_b64_s32:
+  case SVE::BI__builtin_sve_svwhilege_b64_s64:
+  case SVE::BI__builtin_sve_svwhilege_b64_u32:
+  case SVE::BI__builtin_sve_svwhilege_b64_u64:
+  case SVE::BI__builtin_sve_svwhilegt_b8_s32:
+  case SVE::BI__builtin_sve_svwhilegt_b8_s64:
+  case SVE::BI__builtin_sve_svwhilegt_b8_u32:
+  case SVE::BI__builtin_sve_svwhilegt_b8_u64:
+  case SVE::BI__builtin_sve_svwhilegt_b16_s32:
+  case SVE::BI__builtin_sve_svwhilegt_b16_s64:
+  case SVE::BI__builtin_sve_svwhilegt_b16_u32:
+  case SVE::BI__builtin_sve_svwhilegt_b16_u64:
+  case SVE::BI__builtin_sve_svwhilegt_b32_s32:
+  case SVE::BI__builtin_sve_svwhilegt_b32_s64:
+  case SVE::BI__builtin_sve_svwhilegt_b32_u32:
+  case SVE::BI__builtin_sve_svwhilegt_b32_u64:
+  case SVE::BI__builtin_sve_svwhilegt_b64_s32:
+  case SVE::BI__builtin_sve_svwhilegt_b64_s64:
+  case SVE::BI__builtin_sve_svwhilegt_b64_u32:
+  case SVE::BI__builtin_sve_svwhilegt_b64_u64: {
+    SVETypeFlags TypeFlags(Builtin->TypeModifier);
+    llvm::Type *RetTy = getSVEType(TypeFlags);
+    llvm::Type *ArgTypes[] = {RetTy, Ops[1]->getType()};
+
+    Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic, ArgTypes);
+    Value *Call = Builder.CreateCall(F, Ops);
+    return EmitSVEPredicateCast(Call, cast<llvm::VectorType>(Ty));
+  }
+  }
+
   /// Should not happen
   return nullptr;
 }
Index: clang/include/clang/Basic/arm_sve.td
===================================================================
--- clang/include/clang/Basic/arm_sve.td
+++ clang/include/clang/Basic/arm_sve.td
@@ -69,6 +69,10 @@
 // o: 4x width elements, 1/4 element count
 //
 // i: constant uint64_t
+// k: int32_t
+// l: int64_t
+// m: uint32_t
+// n: uint64_t
 //
 // I: Predicate Pattern (sv_pattern)
 
@@ -162,6 +166,7 @@
 def IsStructLoad              : FlagType<0x00020000>;
 def IsStructStore             : FlagType<0x00040000>;
 def IsZExtReturn              : FlagType<0x00080000>; // Return value is sign-extend by default
+def NoAuto                    : FlagType<0x00100000>;
 
 // These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
 class ImmCheckType<int val> {
@@ -363,6 +368,18 @@
 def SVQSHLU_M  : SInst<"svqshlu[_n_{d}]", "uPdi", "csil",         MergeOp1,  "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft,  1>]>;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+// While comparisons
+
+def SVWHILELE_S32 : SInst<"svwhilele_{d}[_{1}]", "Pkk", "PcPsPiPl",     MergeNone, "aarch64_sve_whilele", [NoAuto]>;
+def SVWHILELE_S64 : SInst<"svwhilele_{d}[_{1}]", "Pll", "PcPsPiPl",     MergeNone, "aarch64_sve_whilele", [NoAuto]>;
+def SVWHILELO_U32 : SInst<"svwhilelt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [NoAuto]>;
+def SVWHILELO_U64 : SInst<"svwhilelt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [NoAuto]>;
+def SVWHILELS_U32 : SInst<"svwhilele_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [NoAuto]>;
+def SVWHILELS_U64 : SInst<"svwhilele_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [NoAuto]>;
+def SVWHILELT_S32 : SInst<"svwhilelt_{d}[_{1}]", "Pkk", "PcPsPiPl",     MergeNone, "aarch64_sve_whilelt", [NoAuto]>;
+def SVWHILELT_S64 : SInst<"svwhilelt_{d}[_{1}]", "Pll", "PcPsPiPl",     MergeNone, "aarch64_sve_whilelt", [NoAuto]>;
+
 ////////////////////////////////////////////////////////////////////////////////
 // Floating-point arithmetic
 
@@ -387,3 +404,16 @@
 // Integer arithmetic
 def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]",  "ddqqi",  "il",   MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
 def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]",  "ddqqi",  "UiUl", MergeNone, "aarch64_sve_udot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 WhileGE/GT
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVWHILEGE_S32 : SInst<"svwhilege_{d}[_{1}]", "Pkk", "PcPsPiPl",     MergeNone, "aarch64_sve_whilege", [NoAuto]>;
+def SVWHILEGE_S64 : SInst<"svwhilege_{d}[_{1}]", "Pll", "PcPsPiPl",     MergeNone, "aarch64_sve_whilege", [NoAuto]>;
+def SVWHILEGT_S32 : SInst<"svwhilegt_{d}[_{1}]", "Pkk", "PcPsPiPl",     MergeNone, "aarch64_sve_whilegt", [NoAuto]>;
+def SVWHILEGT_S64 : SInst<"svwhilegt_{d}[_{1}]", "Pll", "PcPsPiPl",     MergeNone, "aarch64_sve_whilegt", [NoAuto]>;
+def SVWHILEHI_U32 : SInst<"svwhilegt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [NoAuto]>;
+def SVWHILEHI_U64 : SInst<"svwhilegt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [NoAuto]>;
+def SVWHILEHS_U32 : SInst<"svwhilege_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [NoAuto]>;
+def SVWHILEHS_U64 : SInst<"svwhilege_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [NoAuto]>;
+}
Index: clang/include/clang/Basic/TargetBuiltins.h
===================================================================
--- clang/include/clang/Basic/TargetBuiltins.h
+++ clang/include/clang/Basic/TargetBuiltins.h
@@ -232,6 +232,7 @@
     bool isStructLoad() const { return Flags & IsStructLoad; }
     bool isStructStore() const { return Flags & IsStructStore; }
     bool isZExtReturn() const { return Flags & IsZExtReturn; }
+    bool isNoAuto() const { return Flags & NoAuto; }
 
     uint64_t getBits() const { return Flags; }
     bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to