sdesmalen updated this revision to Diff 258426.
sdesmalen added a comment.

- Added comment to CGBuiltin explaining that the special case for IsFPConvert 
will be removed in a follow-up patch.


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D78239/new/

https://reviews.llvm.org/D78239

Files:
  clang/include/clang/Basic/TargetBuiltins.h
  clang/include/clang/Basic/arm_sve.td
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cvt.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtlt.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtnt.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtx.c
  clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtxnt.c
  clang/utils/TableGen/SveEmitter.cpp

Index: clang/utils/TableGen/SveEmitter.cpp
===================================================================
--- clang/utils/TableGen/SveEmitter.cpp
+++ clang/utils/TableGen/SveEmitter.cpp
@@ -556,6 +556,21 @@
     ElementBitwidth = Bitwidth = 64;
     NumVectors = 0;
     break;
+  case 'O':
+    Predicate = false;
+    Float = true;
+    ElementBitwidth = 16;
+    break;
+  case 'M':
+    Predicate = false;
+    Float = true;
+    ElementBitwidth = 32;
+    break;
+  case 'N':
+    Predicate = false;
+    Float = true;
+    ElementBitwidth = 64;
+    break;
   case 'S':
     Constant = true;
     Pointer = true;
@@ -1010,6 +1025,17 @@
   if (!InGuard.empty())
     OS << "#endif  //" << InGuard << "\n";
 
+  OS << "#if defined(__ARM_FEATURE_SVE2)\n";
+  OS << "#define svcvtnt_f16_x      svcvtnt_f16_m\n";
+  OS << "#define svcvtnt_f16_f32_x  svcvtnt_f16_f32_m\n";
+  OS << "#define svcvtnt_f32_x      svcvtnt_f32_m\n";
+  OS << "#define svcvtnt_f32_f64_x  svcvtnt_f32_f64_m\n\n";
+
+  OS << "#define svcvtxnt_f32_x     svcvtxnt_f32_m\n";
+  OS << "#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m\n\n";
+
+  OS << "#endif /*__ARM_FEATURE_SVE2 */\n\n";
+
   OS << "#ifdef __cplusplus\n";
   OS << "} // extern \"C\"\n";
   OS << "#endif\n\n";
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtxnt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtxnt.c
@@ -0,0 +1,33 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svfloat32_t test_svcvtxnt_f32_f64_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvtxnt_f32_f64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtxnt.f32f64(<vscale x 4 x float> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtxnt_f32_m'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtxnt_f32_f64_m'}}
+  return SVE_ACLE_FUNC(svcvtxnt_f32,_f64,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvtxnt_f32_f64_x(svfloat32_t even, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvtxnt_f32_f64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtxnt.f32f64(<vscale x 4 x float> %even, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtxnt_f32_x'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtxnt_f32_f64_x'}}
+  return SVE_ACLE_FUNC(svcvtxnt_f32,_f64,_x,)(even, pg, op);
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtx.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtx.c
@@ -0,0 +1,43 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svfloat32_t test_svcvtx_f32_f64_z(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvtx_f32_f64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtx.f32f64(<vscale x 4 x float> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtx_f32_z'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtx_f32_f64_z'}}
+  return SVE_ACLE_FUNC(svcvtx_f32,_f64,_z,)(pg, op);
+}
+
+svfloat32_t test_svcvtx_f32_f64_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvtx_f32_f64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtx.f32f64(<vscale x 4 x float> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtx_f32_m'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtx_f32_f64_m'}}
+  return SVE_ACLE_FUNC(svcvtx_f32,_f64,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvtx_f32_f64_x(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvtx_f32_f64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtx.f32f64(<vscale x 4 x float> undef, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtx_f32_x'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtx_f32_f64_x'}}
+  return SVE_ACLE_FUNC(svcvtx_f32,_f64,_x,)(pg, op);
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtnt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtnt.c
@@ -0,0 +1,53 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svfloat16_t test_svcvtnt_f16_f32_m(svfloat16_t inactive, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvtnt_f16_f32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fcvtnt.f16f32(<vscale x 8 x half> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtnt_f16_m'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtnt_f16_f32_m'}}
+  return SVE_ACLE_FUNC(svcvtnt_f16,_f32,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvtnt_f32_f64_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvtnt_f32_f64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtnt.f32f64(<vscale x 4 x float> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtnt_f32_m'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtnt_f32_f64_m'}}
+  return SVE_ACLE_FUNC(svcvtnt_f32,_f64,_m,)(inactive, pg, op);
+}
+
+svfloat16_t test_svcvtnt_f16_f32_x(svfloat16_t even, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvtnt_f16_f32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fcvtnt.f16f32(<vscale x 8 x half> %even, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtnt_f16_x'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtnt_f16_f32_x'}}
+  return SVE_ACLE_FUNC(svcvtnt_f16,_f32,_x,)(even, pg, op);
+}
+
+svfloat32_t test_svcvtnt_f32_f64_x(svfloat32_t even, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvtnt_f32_f64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtnt.f32f64(<vscale x 4 x float> %even, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtnt_f32_x'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtnt_f32_f64_x'}}
+  return SVE_ACLE_FUNC(svcvtnt_f32,_f64,_x,)(even, pg, op);
+}
Index: clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtlt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_cvtlt.c
@@ -0,0 +1,53 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svfloat32_t test_svcvtlt_f32_f16_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvtlt_f32_f16_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtlt.f32f16(<vscale x 4 x float> %inactive, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtlt_f32_m'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtlt_f32_f16_m'}}
+  return SVE_ACLE_FUNC(svcvtlt_f32,_f16,_m,)(inactive, pg, op);
+}
+
+svfloat64_t test_svcvtlt_f64_f32_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvtlt_f64_f32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fcvtlt.f64f32(<vscale x 2 x double> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtlt_f64_m'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtlt_f64_f32_m'}}
+  return SVE_ACLE_FUNC(svcvtlt_f64,_f32,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvtlt_f32_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvtlt_f32_f16_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvtlt.f32f16(<vscale x 4 x float> undef, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtlt_f32_x'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtlt_f32_f16_x'}}
+  return SVE_ACLE_FUNC(svcvtlt_f32,_f16,_x,)(pg, op);
+}
+
+svfloat64_t test_svcvtlt_f64_f32_x(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvtlt_f64_f32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fcvtlt.f64f32(<vscale x 2 x double> undef, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  // overload-warning@+2 {{implicit declaration of function 'svcvtlt_f64_x'}}
+  // expected-warning@+1 {{implicit declaration of function 'svcvtlt_f64_f32_x'}}
+  return SVE_ACLE_FUNC(svcvtlt_f64,_f32,_x,)(pg, op);
+}
Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cvt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cvt.c
@@ -0,0 +1,809 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svcvt_s16_f16_z(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s16_f16_z
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s16,_f16,_z,)(pg, op);
+}
+
+svint16_t test_svcvt_s16_f16_m(svint16_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s16_f16_m
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s16,_f16,_m,)(inactive, pg, op);
+}
+
+svint16_t test_svcvt_s16_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s16_f16_x
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s16,_f16,_x,)(pg, op);
+}
+
+svuint16_t test_svcvt_u16_f16_z(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u16_f16_z
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u16,_f16,_z,)(pg, op);
+}
+
+svuint16_t test_svcvt_u16_f16_m(svuint16_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u16_f16_m
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u16,_f16,_m,)(inactive, pg, op);
+}
+
+svuint16_t test_svcvt_u16_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u16_f16_x
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u16,_f16,_x,)(pg, op);
+}
+
+svint32_t test_svcvt_s32_f16_z(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f16_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f16,_z,)(pg, op);
+}
+
+svint32_t test_svcvt_s32_f32_z(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f32_z
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f32,_z,)(pg, op);
+}
+
+svint32_t test_svcvt_s32_f64_z(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f64,_z,)(pg, op);
+}
+
+svint32_t test_svcvt_s32_f16_m(svint32_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f16_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32> %inactive, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f16,_m,)(inactive, pg, op);
+}
+
+svint32_t test_svcvt_s32_f32_m(svint32_t inactive, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f32_m
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f32,_m,)(inactive, pg, op);
+}
+
+svint32_t test_svcvt_s32_f64_m(svint32_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f64,_m,)(inactive, pg, op);
+}
+
+svint32_t test_svcvt_s32_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f16_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32> undef, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f16,_x,)(pg, op);
+}
+
+svint32_t test_svcvt_s32_f32_x(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f32_x
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f32,_x,)(pg, op);
+}
+
+svint32_t test_svcvt_s32_f64_x(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_s32_f64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32> undef, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s32,_f64,_x,)(pg, op);
+}
+
+svint64_t test_svcvt_s64_f16_z(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f16_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f16,_z,)(pg, op);
+}
+
+svint64_t test_svcvt_s64_f32_z(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f32_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f32,_z,)(pg, op);
+}
+
+svint64_t test_svcvt_s64_f64_z(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f64_z
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f64,_z,)(pg, op);
+}
+
+svint64_t test_svcvt_s64_f16_m(svint64_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f16_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64> %inactive, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f16,_m,)(inactive, pg, op);
+}
+
+svint64_t test_svcvt_s64_f32_m(svint64_t inactive, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f32,_m,)(inactive, pg, op);
+}
+
+svint64_t test_svcvt_s64_f64_m(svint64_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f64_m
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f64,_m,)(inactive, pg, op);
+}
+
+svint64_t test_svcvt_s64_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f16_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64> undef, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f16,_x,)(pg, op);
+}
+
+svint64_t test_svcvt_s64_f32_x(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64> undef, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f32,_x,)(pg, op);
+}
+
+svint64_t test_svcvt_s64_f64_x(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_s64_f64_x
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_s64,_f64,_x,)(pg, op);
+}
+
+svuint32_t test_svcvt_u32_f16_z(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f16_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f16,_z,)(pg, op);
+}
+
+svuint32_t test_svcvt_u32_f32_z(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f32_z
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f32,_z,)(pg, op);
+}
+
+svuint32_t test_svcvt_u32_f64_z(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f64,_z,)(pg, op);
+}
+
+svuint32_t test_svcvt_u32_f16_m(svuint32_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f16_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32> %inactive, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f16,_m,)(inactive, pg, op);
+}
+
+svuint32_t test_svcvt_u32_f32_m(svuint32_t inactive, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f32_m
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f32,_m,)(inactive, pg, op);
+}
+
+svuint32_t test_svcvt_u32_f64_m(svuint32_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f64,_m,)(inactive, pg, op);
+}
+
+svuint32_t test_svcvt_u32_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f16_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32> undef, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f16,_x,)(pg, op);
+}
+
+svuint32_t test_svcvt_u32_f32_x(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f32_x
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f32,_x,)(pg, op);
+}
+
+svuint32_t test_svcvt_u32_f64_x(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_u32_f64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32> undef, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u32,_f64,_x,)(pg, op);
+}
+
+svuint64_t test_svcvt_u64_f16_z(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f16_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f16,_z,)(pg, op);
+}
+
+svuint64_t test_svcvt_u64_f32_z(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f32_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f32,_z,)(pg, op);
+}
+
+svuint64_t test_svcvt_u64_f64_z(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f64_z
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f64,_z,)(pg, op);
+}
+
+svuint64_t test_svcvt_u64_f16_m(svuint64_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f16_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64> %inactive, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f16,_m,)(inactive, pg, op);
+}
+
+svuint64_t test_svcvt_u64_f32_m(svuint64_t inactive, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f32,_m,)(inactive, pg, op);
+}
+
+svuint64_t test_svcvt_u64_f64_m(svuint64_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f64_m
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f64,_m,)(inactive, pg, op);
+}
+
+svuint64_t test_svcvt_u64_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f16_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64> undef, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f16,_x,)(pg, op);
+}
+
+svuint64_t test_svcvt_u64_f32_x(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64> undef, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f32,_x,)(pg, op);
+}
+
+svuint64_t test_svcvt_u64_f64_x(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_u64_f64_x
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_u64,_f64,_x,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_s32_z(svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_s32_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_s32,_z,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_s32_z(svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_s32_z
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_s32,_z,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_s32_z(svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_s32_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_s32,_z,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_s32_m(svfloat16_t inactive, svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_s32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_s32,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvt_f32_s32_m(svfloat32_t inactive, svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_s32_m
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float> %inactive, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_s32,_m,)(inactive, pg, op);
+}
+
+svfloat64_t test_svcvt_f64_s32_m(svfloat64_t inactive, svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_s32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_s32,_m,)(inactive, pg, op);
+}
+
+svfloat16_t test_svcvt_f16_s32_x(svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_s32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half> undef, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_s32,_x,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_s32_x(svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_s32_x
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float> undef, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_s32,_x,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_s32_x(svbool_t pg, svint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_s32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double> undef, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_s32,_x,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_s64_z(svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_s64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_s64,_z,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_s64_z(svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_s64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_s64,_z,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_s64_z(svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_s64_z
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_s64,_z,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_s64_m(svfloat16_t inactive, svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_s64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_s64,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvt_f32_s64_m(svfloat32_t inactive, svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_s64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_s64,_m,)(inactive, pg, op);
+}
+
+svfloat64_t test_svcvt_f64_s64_m(svfloat64_t inactive, svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_s64_m
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double> %inactive, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_s64,_m,)(inactive, pg, op);
+}
+
+svfloat16_t test_svcvt_f16_s64_x(svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_s64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half> undef, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_s64,_x,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_s64_x(svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_s64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float> undef, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_s64,_x,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_s64_x(svbool_t pg, svint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_s64_x
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double> undef, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_s64,_x,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_u32_z(svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_u32_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_u32,_z,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_u32_z(svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_u32_z
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_u32,_z,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_u32_z(svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_u32_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_u32,_z,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_u32_m(svfloat16_t inactive, svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_u32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_u32,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvt_f32_u32_m(svfloat32_t inactive, svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_u32_m
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float> %inactive, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_u32,_m,)(inactive, pg, op);
+}
+
+svfloat64_t test_svcvt_f64_u32_m(svfloat64_t inactive, svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_u32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_u32,_m,)(inactive, pg, op);
+}
+
+svfloat16_t test_svcvt_f16_u32_x(svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_u32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half> undef, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_u32,_x,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_u32_x(svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_u32_x
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float> undef, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_u32,_x,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_u32_x(svbool_t pg, svuint32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_u32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double> undef, <vscale x 16 x i1> %pg, <vscale x 4 x i32> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_u32,_x,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_u64_z(svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_u64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_u64,_z,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_u64_z(svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_u64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_u64,_z,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_u64_z(svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_u64_z
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_u64,_z,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_u64_m(svfloat16_t inactive, svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_u64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_u64,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvt_f32_u64_m(svfloat32_t inactive, svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_u64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_u64,_m,)(inactive, pg, op);
+}
+
+svfloat64_t test_svcvt_f64_u64_m(svfloat64_t inactive, svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_u64_m
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double> %inactive, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_u64,_m,)(inactive, pg, op);
+}
+
+svfloat16_t test_svcvt_f16_u64_x(svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_u64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half> undef, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_u64,_x,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_u64_x(svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_u64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float> undef, <vscale x 16 x i1> %pg, <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_u64,_x,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_u64_x(svbool_t pg, svuint64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_u64_x
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double> undef, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_u64,_x,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_f16_z(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_f16_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_f16,_z,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_f16_z(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_f16_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_f16,_z,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_f16_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_f16_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float> %inactive, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_f16,_m,)(inactive, pg, op);
+}
+
+svfloat64_t test_svcvt_f64_f16_m(svfloat64_t inactive, svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_f16_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double> %inactive, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_f16,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvt_f32_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_f16_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float> undef, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_f16,_x,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_f16_x(svbool_t pg, svfloat16_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_f16_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double> undef, <vscale x 16 x i1> %pg, <vscale x 8 x half> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_f16,_x,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_f32_z(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_f32_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_f32,_z,)(pg, op);
+}
+
+svfloat64_t test_svcvt_f64_f32_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_f32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_f32,_m,)(inactive, pg, op);
+}
+
+svfloat64_t test_svcvt_f64_f32_x(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f64_f32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double> undef, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f64,_f32,_x,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_f32_z(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_f32_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_f32,_z,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_f64_z(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_f64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_f64,_z,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_f32_m(svfloat16_t inactive, svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_f32_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half> %inactive, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_f32,_m,)(inactive, pg, op);
+}
+
+svfloat16_t test_svcvt_f16_f64_m(svfloat16_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_f64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_f64,_m,)(inactive, pg, op);
+}
+
+svfloat16_t test_svcvt_f16_f32_x(svbool_t pg, svfloat32_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_f32_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half> undef, <vscale x 16 x i1> %pg, <vscale x 4 x float> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_f32,_x,)(pg, op);
+}
+
+svfloat16_t test_svcvt_f16_f64_x(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f16_f64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half> undef, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f16,_f64,_x,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_f64_z(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_f64_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_f64,_z,)(pg, op);
+}
+
+svfloat32_t test_svcvt_f32_f64_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_f64_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float> %inactive, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_f64,_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svcvt_f32_f64_x(svbool_t pg, svfloat64_t op)
+{
+  // CHECK-LABEL: test_svcvt_f32_f64_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float> undef, <vscale x 16 x i1> %pg, <vscale x 2 x double> %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svcvt_f32,_f64,_x,)(pg, op);
+}
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -7628,6 +7628,9 @@
   if (TypeFlags.isOverloadWhileRW())
     return {getSVEPredType(TypeFlags), Ops[0]->getType()};
 
+  if (TypeFlags.isOverloadCvt())
+    return {Ops[0]->getType(), Ops.back()->getType()};
+
   assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
   return {DefaultType};
 }
@@ -7678,8 +7681,18 @@
     // Predicates must match the main datatype.
     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
       if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
-        if (PredTy->getElementType()->isIntegerTy(1))
-          Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
+        if (PredTy->getElementType()->isIntegerTy(1)) {
+          // The special case for `isFPConvert` is because the predicates of the
+          // ACLE IR intrinsics for FP converts are always of type <vscale x 16 x i1>.
+          // This special-case will be removed in a follow-up patch that updates
+          // the FP conversion intrinsics with predicates that match the
+          // default type.
+          llvm::VectorType *NewPredTy =
+              TypeFlags.isFPConvert()
+                  ? llvm::VectorType::get(Builder.getInt1Ty(), {16, true})
+                  : getSVEType(TypeFlags);
+          Ops[i] = EmitSVEPredicateCast(Ops[i], NewPredTy);
+        }
 
     // Splat scalar operand to vector (intrinsics with _n infix)
     if (TypeFlags.hasSplatOperand()) {
Index: clang/include/clang/Basic/arm_sve.td
===================================================================
--- clang/include/clang/Basic/arm_sve.td
+++ clang/include/clang/Basic/arm_sve.td
@@ -169,7 +169,9 @@
 def IsOverloadNone            : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
 def IsOverloadWhile           : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
 def IsOverloadWhileRW         : FlagType<0x00400000>; // Use {pred(default type), typeof(operand0)} as overloaded types.
+def IsOverloadCvt             : FlagType<0x00800000>; // Use {typeof(operand0), typeof(last operand)} as overloaded types.
 def OverloadKindMask          : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
+def IsFPConvert               : FlagType<0x01000000>;
 
 // These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
 class ImmCheckType<int val> {
@@ -394,6 +396,102 @@
 def SVMLA_LANE  : SInst<"svmla_lane[_{d}]",  "ddddi",  "hfd", MergeNone, "aarch64_sve_fmla_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
 def SVCMLA_LANE : SInst<"svcmla_lane[_{d}]", "ddddii", "hf",  MergeNone, "aarch64_sve_fcmla_lane", [], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
                                                                                                         ImmCheck<4, ImmCheckComplexRotAll90>]>;
+////////////////////////////////////////////////////////////////////////////////
+// Floating-point conversions
+
+multiclass SInstCvtMXZ<
+    string name, string m_types, string xz_types, string types,
+    string intrinsic, list<FlagType> flags = [IsFPConvert, IsOverloadNone]> {
+  def _M : SInst<name, m_types,  types, MergeOp1,     intrinsic, flags>;
+  def _X : SInst<name, xz_types, types, MergeAnyExp,  intrinsic, flags>;
+  def _Z : SInst<name, xz_types, types, MergeZeroExp, intrinsic, flags>;
+}
+
+multiclass SInstCvtMX<string name, string m_types, string xz_types,
+                      string types, string intrinsic,
+                      list<FlagType> flags = [IsFPConvert, IsOverloadNone]> {
+  def _M : SInst<name, m_types,  types, MergeOp1,     intrinsic, flags>;
+  def _X : SInst<name, xz_types, types, MergeAnyExp,  intrinsic, flags>;
+}
+
+// svcvt_s##_f16
+defm SVFCVTZS_S16_F16 : SInstCvtMXZ<"svcvt_s16[_f16]", "ddPO", "dPO", "s",  "aarch64_sve_fcvtzs", [IsOverloadCvt]>;
+defm SVFCVTZS_S32_F16 : SInstCvtMXZ<"svcvt_s32[_f16]", "ddPO", "dPO", "i",  "aarch64_sve_fcvtzs_i32f16">;
+defm SVFCVTZS_S64_F16 : SInstCvtMXZ<"svcvt_s64[_f16]", "ddPO", "dPO", "l",  "aarch64_sve_fcvtzs_i64f16">;
+
+// svcvt_s##_f32
+defm SVFCVTZS_S32_F32 : SInstCvtMXZ<"svcvt_s32[_f32]", "ddPM", "dPM", "i",  "aarch64_sve_fcvtzs", [IsOverloadCvt]>;
+defm SVFCVTZS_S64_F32 : SInstCvtMXZ<"svcvt_s64[_f32]", "ddPM", "dPM", "l",  "aarch64_sve_fcvtzs_i64f32">;
+
+// svcvt_s##_f64
+defm SVFCVTZS_S32_F64 : SInstCvtMXZ<"svcvt_s32[_f64]", "ddPN", "dPN", "i",  "aarch64_sve_fcvtzs_i32f64">;
+defm SVFCVTZS_S64_F64 : SInstCvtMXZ<"svcvt_s64[_f64]", "ddPN", "dPN", "l",  "aarch64_sve_fcvtzs", [IsOverloadCvt]>;
+
+// svcvt_u##_f16
+defm SVFCVTZU_U16_F16 : SInstCvtMXZ<"svcvt_u16[_f16]", "ddPO", "dPO", "Us", "aarch64_sve_fcvtzu", [IsOverloadCvt]>;
+defm SVFCVTZU_U32_F16 : SInstCvtMXZ<"svcvt_u32[_f16]", "ddPO", "dPO", "Ui", "aarch64_sve_fcvtzu_i32f16">;
+defm SVFCVTZU_U64_F16 : SInstCvtMXZ<"svcvt_u64[_f16]", "ddPO", "dPO", "Ul", "aarch64_sve_fcvtzu_i64f16">;
+
+// svcvt_u##_f32
+defm SVFCVTZU_U32_F32 : SInstCvtMXZ<"svcvt_u32[_f32]", "ddPM", "dPM", "Ui", "aarch64_sve_fcvtzu", [IsOverloadCvt]>;
+defm SVFCVTZU_U64_F32 : SInstCvtMXZ<"svcvt_u64[_f32]", "ddPM", "dPM", "Ul", "aarch64_sve_fcvtzu_i64f32">;
+
+// svcvt_u##_f64
+defm SVFCVTZU_U32_F64 : SInstCvtMXZ<"svcvt_u32[_f64]", "ddPN", "dPN", "Ui", "aarch64_sve_fcvtzu_i32f64">;
+defm SVFCVTZU_U64_F64 : SInstCvtMXZ<"svcvt_u64[_f64]", "ddPN", "dPN", "Ul", "aarch64_sve_fcvtzu", [IsOverloadCvt]>;
+
+// svcvt_f16_s##
+defm SVFCVTZS_F16_S16 : SInstCvtMXZ<"svcvt_f16[_s16]", "OOPd", "OPd", "s",  "aarch64_sve_scvtf", [IsOverloadCvt]>;
+defm SVFCVTZS_F16_S32 : SInstCvtMXZ<"svcvt_f16[_s32]", "OOPd", "OPd", "i",  "aarch64_sve_scvtf_f16i32">;
+defm SVFCVTZS_F16_S64 : SInstCvtMXZ<"svcvt_f16[_s64]", "OOPd", "OPd", "l",  "aarch64_sve_scvtf_f16i64">;
+
+// svcvt_f32_s##
+defm SVFCVTZS_F32_S32 : SInstCvtMXZ<"svcvt_f32[_s32]", "MMPd", "MPd", "i",  "aarch64_sve_scvtf", [IsOverloadCvt]>;
+defm SVFCVTZS_F32_S64 : SInstCvtMXZ<"svcvt_f32[_s64]", "MMPd", "MPd", "l",  "aarch64_sve_scvtf_f32i64">;
+
+// svcvt_f64_s##
+defm SVFCVTZS_F64_S32 : SInstCvtMXZ<"svcvt_f64[_s32]", "NNPd", "NPd", "i",  "aarch64_sve_scvtf_f64i32">;
+defm SVFCVTZS_F64_S64 : SInstCvtMXZ<"svcvt_f64[_s64]", "NNPd", "NPd", "l",  "aarch64_sve_scvtf", [IsOverloadCvt]>;
+
+// svcvt_f16_u##
+defm SVFCVTZU_F16_U16 : SInstCvtMXZ<"svcvt_f16[_u16]", "OOPd", "OPd", "Us", "aarch64_sve_ucvtf", [IsOverloadCvt]>;
+defm SVFCVTZU_F16_U32 : SInstCvtMXZ<"svcvt_f16[_u32]", "OOPd", "OPd", "Ui", "aarch64_sve_ucvtf_f16i32">;
+defm SVFCVTZU_F16_U64 : SInstCvtMXZ<"svcvt_f16[_u64]", "OOPd", "OPd", "Ul", "aarch64_sve_ucvtf_f16i64">;
+
+// svcvt_f32_u##
+defm SVFCVTZU_F32_U32 : SInstCvtMXZ<"svcvt_f32[_u32]", "MMPd", "MPd", "Ui", "aarch64_sve_ucvtf", [IsOverloadCvt]>;
+defm SVFCVTZU_F32_U64 : SInstCvtMXZ<"svcvt_f32[_u64]", "MMPd", "MPd", "Ul", "aarch64_sve_ucvtf_f32i64">;
+
+// svcvt_f64_u##
+defm SVFCVTZU_F64_U32 : SInstCvtMXZ<"svcvt_f64[_u32]", "NNPd", "NPd", "Ui", "aarch64_sve_ucvtf_f64i32">;
+defm SVFCVTZU_F64_U64 : SInstCvtMXZ<"svcvt_f64[_u64]", "NNPd", "NPd", "Ul", "aarch64_sve_ucvtf", [IsOverloadCvt]>;
+
+// svcvt_f16_f##
+defm SVFCVT_F16_F32   : SInstCvtMXZ<"svcvt_f16[_f32]", "OOPd", "OPd", "f", "aarch64_sve_fcvt_f16f32">;
+defm SVFCVT_F16_F64   : SInstCvtMXZ<"svcvt_f16[_f64]", "OOPd", "OPd", "d", "aarch64_sve_fcvt_f16f64">;
+
+// svcvt_f32_f##
+defm SVFCVT_F32_F16   : SInstCvtMXZ<"svcvt_f32[_f16]", "MMPd", "MPd", "h", "aarch64_sve_fcvt_f32f16">;
+defm SVFCVT_F32_F64   : SInstCvtMXZ<"svcvt_f32[_f64]", "MMPd", "MPd", "d", "aarch64_sve_fcvt_f32f64">;
+
+// svcvt_f64_f##
+defm SVFCVT_F64_F16   : SInstCvtMXZ<"svcvt_f64[_f16]", "NNPd", "NPd", "h", "aarch64_sve_fcvt_f64f16">;
+defm SVFCVT_F64_F32   : SInstCvtMXZ<"svcvt_f64[_f32]", "NNPd", "NPd", "f", "aarch64_sve_fcvt_f64f32">;
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+defm SVCVTLT_F32    : SInstCvtMX<"svcvtlt_f32[_f16]",  "ddPh", "dPh", "f", "aarch64_sve_fcvtlt_f32f16">;
+defm SVCVTLT_F64    : SInstCvtMX<"svcvtlt_f64[_f32]",  "ddPh", "dPh", "d", "aarch64_sve_fcvtlt_f64f32">;
+
+defm SVCVTX_F32     : SInstCvtMXZ<"svcvtx_f32[_f64]",  "MMPd", "MPd", "d", "aarch64_sve_fcvtx_f32f64">;
+
+def SVCVTNT_F32     : SInst<"svcvtnt_f16[_f32]",  "hhPd", "f", MergeOp1, "aarch64_sve_fcvtnt_f16f32">;
+def SVCVTNT_F64     : SInst<"svcvtnt_f32[_f64]",  "hhPd", "d", MergeOp1, "aarch64_sve_fcvtnt_f32f64">;
+//  SVCVTNT_X       : Implemented as macro by SveEmitter.cpp
+
+def SVCVTXNT_F32    : SInst<"svcvtxnt_f32[_f64]", "MMPd", "d", MergeOp1, "aarch64_sve_fcvtxnt_f32f64">;
+//  SVCVTXNT_X_F32  : Implemented as macro by SveEmitter.cpp
+
+}
 
 def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi",  "hfd", MergeOp1,  "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
 def SVCMLA_M : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeOp1,  "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
Index: clang/include/clang/Basic/TargetBuiltins.h
===================================================================
--- clang/include/clang/Basic/TargetBuiltins.h
+++ clang/include/clang/Basic/TargetBuiltins.h
@@ -236,6 +236,8 @@
     bool isOverloadWhile() const { return Flags & IsOverloadWhile; }
     bool isOverloadDefault() const { return !(Flags & OverloadKindMask); }
     bool isOverloadWhileRW() const { return Flags & IsOverloadWhileRW; }
+    bool isOverloadCvt() const { return Flags & IsOverloadCvt; }
+    bool isFPConvert() const { return Flags & IsFPConvert; }
 
     uint64_t getBits() const { return Flags; }
     bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to