https://github.com/amilendra created 
https://github.com/llvm/llvm-project/pull/187527

Add the following new clang intrinsics based on the ACLE specification 
https://github.com/ARM-software/acle/pull/428 (Add alpha support for 9.7 data 
processing intrinsics)

- ADDQP (Add pairwise within quadword vector segments)
  - svint8_t svaddqp_s8(svint8_t, svint8_t) / svint8_t svaddqp(svint8_t, 
svint8_t)
  - svuint8_t svaddqp_u8(svuint8_t, svuint8_t) / svuint8_t svaddqp(svuint8_t, 
svuint8_t)
  - svint16_t svaddqp_s16(svint16_t, svint16_t) / svint16_t svaddqp(svint16_t, 
svint16_t)
  - svuint16_t svaddqp_u16(svuint16_t, svuint16_t) / svuint16_t 
svaddqp(svuint16_t, svuint16_t)
  - svint32_t svaddqp_s32(svint32_t, svint32_t) / svint32_t svaddqp(svint32_t, 
svint32_t)
  - svuint32_t svaddqp_u32(svuint32_t, svuint32_t) / svuint32_t 
svaddqp(svuint32_t, svuint32_t)
  - svint64_t svaddqp_s64(svint64_t, svint64_t) / svint64_t svaddqp(svint64_t, 
svint64_t)
  - svuint64_t svaddqp_u64(svuint64_t, svuint64_t) / svuint64_t 
svaddqp(svuint64_t, svuint64_t)

- ADDSUBP (Add and subtract pairwise)
  - svint8_t svaddsubp_s8(svint8_t, svint8_t) / svint8_t svaddsubp(svint8_t, 
svint8_t)
  - svuint8_t svaddsubp_u8(svuint8_t, svuint8_t) / svuint8_t 
svaddsubp(svuint8_t, svuint8_t)
  - svint16_t svaddsubp_s16(svint16_t, svint16_t) / svint16_t 
svaddsubp(svint16_t, svint16_t)
  - svuint16_t svaddsubp_u16(svuint16_t, svuint16_t) / svuint16_t 
svaddsubp(svuint16_t, svuint16_t)
  - svint32_t svaddsubp_s32(svint32_t, svint32_t) / svint32_t 
svaddsubp(svint32_t, svint32_t)
  - svuint32_t svaddsubp_u32(svuint32_t, svuint32_t) / svuint32_t 
svaddsubp(svuint32_t, svuint32_t)
  - svint64_t svaddsubp_s64(svint64_t, svint64_t) / svint64_t 
svaddsubp(svint64_t, svint64_t)
  - svuint64_t svaddsubp_u64(svuint64_t, svuint64_t) / svuint64_t 
svaddsubp(svuint64_t, svuint64_t)

- SUBP (Subtract pairwise)
  - svint8_t svsubp_s8(svbool_t, svint8_t, svint8_t) / svint8_t 
svsubp(svbool_t, svint8_t, svint8_t)
  - svuint8_t svsubp_u8(svbool_t, svuint8_t, svuint8_t) / svuint8_t 
svsubp(svbool_t, svuint8_t, svuint8_t)
  - svint16_t svsubp_s16(svbool_t, svint16_t, svint16_t) / svint16_t 
svsubp(svbool_t, svint16_t, svint16_t)
  - svuint16_t svsubp_u16(svbool_t, svuint16_t, svuint16_t) / svuint16_t 
svsubp(svbool_t, svuint16_t, svuint16_t)
  - svint32_t svsubp_s32(svbool_t, svint32_t, svint32_t) / svint32_t 
svsubp(svbool_t, svint32_t, svint32_t)
  - svuint32_t svsubp_u32(svbool_t, svuint32_t, svuint32_t) / svuint32_t 
svsubp(svbool_t, svuint32_t, svuint32_t)
  - svint64_t svsubp_s64(svbool_t, svint64_t, svint64_t) / svint64_t 
svsubp(svbool_t, svint64_t, svint64_t)
  - svuint64_t svsubp_u64(svbool_t, svuint64_t, svuint64_t) / svuint64_t 
svsubp(svbool_t, svuint64_t, svuint64_t)

>From 228896570d2180120c5f88b36be9fbd73b62b749 Mon Sep 17 00:00:00 2001
From: Amilendra Kodithuwakku <[email protected]>
Date: Thu, 19 Mar 2026 15:33:30 +0000
Subject: [PATCH] [Clang][AArch64][SVE2p3][SME2p3] Add intrinsics for v9.7a
 add/add-and-subtract/subtract pairwise operations

Add the following new clang intrinsics based on the ACLE specification
https://github.com/ARM-software/acle/pull/428 (Add alpha support for 9.7 data 
processing intrinsics)

- ADDQP (Add pairwise within quadword vector segments)
  - svint8_t svaddqp_s8(svint8_t, svint8_t) / svint8_t svaddqp(svint8_t, 
svint8_t)
  - svuint8_t svaddqp_u8(svuint8_t, svuint8_t) / svuint8_t svaddqp(svuint8_t, 
svuint8_t)
  - svint16_t svaddqp_s16(svint16_t, svint16_t) / svint16_t svaddqp(svint16_t, 
svint16_t)
  - svuint16_t svaddqp_u16(svuint16_t, svuint16_t) / svuint16_t 
svaddqp(svuint16_t, svuint16_t)
  - svint32_t svaddqp_s32(svint32_t, svint32_t) / svint32_t svaddqp(svint32_t, 
svint32_t)
  - svuint32_t svaddqp_u32(svuint32_t, svuint32_t) / svuint32_t 
svaddqp(svuint32_t, svuint32_t)
  - svint64_t svaddqp_s64(svint64_t, svint64_t) / svint64_t svaddqp(svint64_t, 
svint64_t)
  - svuint64_t svaddqp_u64(svuint64_t, svuint64_t) / svuint64_t 
svaddqp(svuint64_t, svuint64_t)

- ADDSUBP (Add and subtract pairwise)
  - svint8_t svaddsubp_s8(svint8_t, svint8_t) / svint8_t svaddsubp(svint8_t, 
svint8_t)
  - svuint8_t svaddsubp_u8(svuint8_t, svuint8_t) / svuint8_t 
svaddsubp(svuint8_t, svuint8_t)
  - svint16_t svaddsubp_s16(svint16_t, svint16_t) / svint16_t 
svaddsubp(svint16_t, svint16_t)
  - svuint16_t svaddsubp_u16(svuint16_t, svuint16_t) / svuint16_t 
svaddsubp(svuint16_t, svuint16_t)
  - svint32_t svaddsubp_s32(svint32_t, svint32_t) / svint32_t 
svaddsubp(svint32_t, svint32_t)
  - svuint32_t svaddsubp_u32(svuint32_t, svuint32_t) / svuint32_t 
svaddsubp(svuint32_t, svuint32_t)
  - svint64_t svaddsubp_s64(svint64_t, svint64_t) / svint64_t 
svaddsubp(svint64_t, svint64_t)
  - svuint64_t svaddsubp_u64(svuint64_t, svuint64_t) / svuint64_t 
svaddsubp(svuint64_t, svuint64_t)

- SUBP (Subtract pairwise)
  - svint8_t svsubp_s8(svbool_t, svint8_t, svint8_t) / svint8_t 
svsubp(svbool_t, svint8_t, svint8_t)
  - svuint8_t svsubp_u8(svbool_t, svuint8_t, svuint8_t) / svuint8_t 
svsubp(svbool_t, svuint8_t, svuint8_t)
  - svint16_t svsubp_s16(svbool_t, svint16_t, svint16_t) / svint16_t 
svsubp(svbool_t, svint16_t, svint16_t)
  - svuint16_t svsubp_u16(svbool_t, svuint16_t, svuint16_t) / svuint16_t 
svsubp(svbool_t, svuint16_t, svuint16_t)
  - svint32_t svsubp_s32(svbool_t, svint32_t, svint32_t) / svint32_t 
svsubp(svbool_t, svint32_t, svint32_t)
  - svuint32_t svsubp_u32(svbool_t, svuint32_t, svuint32_t) / svuint32_t 
svsubp(svbool_t, svuint32_t, svuint32_t)
  - svint64_t svsubp_s64(svbool_t, svint64_t, svint64_t) / svint64_t 
svsubp(svbool_t, svint64_t, svint64_t)
  - svuint64_t svsubp_u64(svbool_t, svuint64_t, svuint64_t) / svuint64_t 
svsubp(svbool_t, svuint64_t, svuint64_t)
---
 clang/include/clang/Basic/arm_sve.td          |  11 +
 .../sve2p3-intrinsics/acle_sve2p3_addqp.c     | 262 ++++++++++++++
 .../sve2p3-intrinsics/acle_sve2p3_addsubp.c   | 262 ++++++++++++++
 .../sve2p3-intrinsics/acle_sve2p3_subp.c      | 322 ++++++++++++++++++
 ...e2p3_RP___sme_AND_LP_sve2p3_OR_sme2p3_RP.c | 193 +++++++++++
 llvm/include/llvm/IR/IntrinsicsAArch64.td     |   4 +
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |   6 +-
 7 files changed, 1057 insertions(+), 3 deletions(-)
 create mode 100644 
clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_addqp.c
 create mode 100644 
clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_addsubp.c
 create mode 100644 
clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_subp.c
 create mode 100644 
clang/test/Sema/AArch64/arm_sve_feature_dependent_sve_AND_LP_sve2p3_OR_sme2p3_RP___sme_AND_LP_sve2p3_OR_sme2p3_RP.c

diff --git a/clang/include/clang/Basic/arm_sve.td 
b/clang/include/clang/Basic/arm_sve.td
index be3cd8a76503b..5bc48c7bde799 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -1421,6 +1421,17 @@ defm SVMINP_S : SInstPairwise<"svminp",   "csli",        
 "aarch64_sve_sminp", [
 defm SVMINP_U : SInstPairwise<"svminp",   "UcUsUiUl",     "aarch64_sve_uminp", 
[VerifyRuntimeMode]>;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+// SVE2.3 - Add pairwise within quadword vector segments
+
+let SVETargetGuard = "sve2p3|sme2p3", SMETargetGuard = "sve2p3|sme2p3" in {
+def SVADDQP   : SInst<"svaddqp[_{d}]", "ddd", "csilUcUsUiUl", MergeNone, 
"aarch64_sve_addqp",
+                    [VerifyRuntimeMode]>;
+def SVADDSUBP : SInst<"svaddsubp[_{d}]", "ddd", "csilUcUsUiUl", MergeNone, 
"aarch64_sve_addsubp",
+                    [VerifyRuntimeMode]>;
+def SVSUBP    : SInst<"svsubp[_{d}]", "dPdd", "csilUcUsUiUl", MergeNone, 
"aarch64_sve_subp", [VerifyRuntimeMode]>;
+}
+
 
////////////////////////////////////////////////////////////////////////////////
 // SVE2 - Widening pairwise arithmetic
 
diff --git a/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_addqp.c 
b/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_addqp.c
new file mode 100644
index 0000000000000..50eb8515f04e1
--- /dev/null
+++ b/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_addqp.c
@@ -0,0 +1,262 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 6
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve -target-feature +sve2 -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve -target-feature +sve2 -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sme                       -target-feature +sme2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve                       -target-feature +sme2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sme                       -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s 
-check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sme                       -target-feature +sme2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve                       -target-feature +sme2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sme                       -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+// REQUIRES: aarch64-registered-target
+
+#include <arm_sve.h>
+
+#if defined(__ARM_FEATURE_SME) && defined(__ARM_FEATURE_SVE)
+#define ATTR __arm_streaming_compatible
+#elif defined(__ARM_FEATURE_SME)
+#define ATTR __arm_streaming
+#else
+#define ATTR
+#endif
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED) A1
+#else
+#define SVE_ACLE_FUNC(A1,A2) A1##A2
+#endif
+
+// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_svaddqp_s8(
+// CHECK-SAME: <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) 
#[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.addqp.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> 
[[TMP1]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> 
@_Z15test_svaddqp_s8u10__SVInt8_tS_(
+// CPP-CHECK-SAME: <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> 
[[ZM:%.*]]) #[[ATTR0:[0-9]+]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.addqp.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> 
[[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
+//
+svint8_t test_svaddqp_s8(svint8_t zn, svint8_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddqp,_s8)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_svaddqp_u8(
+// CHECK-SAME: <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.addqp.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> 
[[TMP1]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> 
@_Z15test_svaddqp_u8u11__SVUint8_tS_(
+// CPP-CHECK-SAME: <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.addqp.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> 
[[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
+//
+svuint8_t test_svaddqp_u8(svuint8_t zn, svuint8_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddqp,_u8)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_svaddqp_s16(
+// CHECK-SAME: <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.addqp.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> 
[[TMP1]])
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> 
@_Z16test_svaddqp_s16u11__SVInt16_tS_(
+// CPP-CHECK-SAME: <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.addqp.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> 
[[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
+//
+svint16_t test_svaddqp_s16(svint16_t zn, svint16_t zm)ATTR
+{
+  return SVE_ACLE_FUNC(svaddqp,_s16)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_svaddqp_u16(
+// CHECK-SAME: <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.addqp.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> 
[[TMP1]])
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> 
@_Z16test_svaddqp_u16u12__SVUint16_tS_(
+// CPP-CHECK-SAME: <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.addqp.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> 
[[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
+//
+svuint16_t test_svaddqp_u16(svuint16_t zn, svuint16_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddqp,_u16)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_svaddqp_s32(
+// CHECK-SAME: <vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.addqp.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> 
[[TMP1]])
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> 
@_Z16test_svaddqp_s32u11__SVInt32_tS_(
+// CPP-CHECK-SAME: <vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.addqp.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> 
[[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+svint32_t test_svaddqp_s32(svint32_t zn, svint32_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddqp,_s32)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_svaddqp_u32(
+// CHECK-SAME: <vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.addqp.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> 
[[TMP1]])
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> 
@_Z16test_svaddqp_u32u12__SVUint32_tS_(
+// CPP-CHECK-SAME: <vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.addqp.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> 
[[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+svuint32_t test_svaddqp_u32(svuint32_t zn, svuint32_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddqp,_u32)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_svaddqp_s64(
+// CHECK-SAME: <vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.addqp.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> 
[[TMP1]])
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> 
@_Z16test_svaddqp_s64u11__SVInt64_tS_(
+// CPP-CHECK-SAME: <vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.addqp.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> 
[[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
+//
+svint64_t test_svaddqp_s64(svint64_t zn, svint64_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddqp,_s64)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_svaddqp_u64(
+// CHECK-SAME: <vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.addqp.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> 
[[TMP1]])
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> 
@_Z16test_svaddqp_u64u12__SVUint64_tS_(
+// CPP-CHECK-SAME: <vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.addqp.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> 
[[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
+//
+svuint64_t test_svaddqp_u64(svuint64_t zn, svuint64_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddqp,_u64)(zn, zm);
+}
diff --git a/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_addsubp.c 
b/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_addsubp.c
new file mode 100644
index 0000000000000..afea0a51cb910
--- /dev/null
+++ b/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_addsubp.c
@@ -0,0 +1,262 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 6
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve -target-feature +sve2 -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve -target-feature +sve2 -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sme                       -target-feature +sme2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve                       -target-feature +sme2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sme                       -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s 
-check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sme                       -target-feature +sme2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve                       -target-feature +sme2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sme                       -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+// REQUIRES: aarch64-registered-target
+
+#include <arm_sve.h>
+
+#if defined(__ARM_FEATURE_SME) && defined(__ARM_FEATURE_SVE)
+#define ATTR __arm_streaming_compatible
+#elif defined(__ARM_FEATURE_SME)
+#define ATTR __arm_streaming
+#else
+#define ATTR
+#endif
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED) A1
+#else
+#define SVE_ACLE_FUNC(A1,A2) A1##A2
+#endif
+
+// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_addsubp_s8(
+// CHECK-SAME: <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) 
#[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.addsubp.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x 
i8> [[TMP1]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> 
@_Z15test_addsubp_s8u10__SVInt8_tS_(
+// CPP-CHECK-SAME: <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> 
[[ZM:%.*]]) #[[ATTR0:[0-9]+]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.addsubp.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x 
i8> [[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
+//
+svint8_t test_addsubp_s8(svint8_t zn, svint8_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddsubp,_s8)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_addsubp_u8(
+// CHECK-SAME: <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.addsubp.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x 
i8> [[TMP1]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> 
@_Z15test_addsubp_u8u11__SVUint8_tS_(
+// CPP-CHECK-SAME: <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.addsubp.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x 
i8> [[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
+//
+svuint8_t test_addsubp_u8(svuint8_t zn, svuint8_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddsubp,_u8)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_addsubp_s16(
+// CHECK-SAME: <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.addsubp.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x 
i16> [[TMP1]])
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> 
@_Z16test_addsubp_s16u11__SVInt16_tS_(
+// CPP-CHECK-SAME: <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.addsubp.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x 
i16> [[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
+//
+svint16_t test_addsubp_s16(svint16_t zn, svint16_t zm)ATTR
+{
+  return SVE_ACLE_FUNC(svaddsubp,_s16)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_addsubp_u16(
+// CHECK-SAME: <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.addsubp.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x 
i16> [[TMP1]])
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> 
@_Z16test_addsubp_u16u12__SVUint16_tS_(
+// CPP-CHECK-SAME: <vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.addsubp.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x 
i16> [[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
+//
+svuint16_t test_addsubp_u16(svuint16_t zn, svuint16_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddsubp,_u16)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_addsubp_s32(
+// CHECK-SAME: <vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.addsubp.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x 
i32> [[TMP1]])
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> 
@_Z16test_addsubp_s32u11__SVInt32_tS_(
+// CPP-CHECK-SAME: <vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.addsubp.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x 
i32> [[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+svint32_t test_addsubp_s32(svint32_t zn, svint32_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddsubp,_s32)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_addsubp_u32(
+// CHECK-SAME: <vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.addsubp.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x 
i32> [[TMP1]])
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> 
@_Z16test_addsubp_u32u12__SVUint32_tS_(
+// CPP-CHECK-SAME: <vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.addsubp.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x 
i32> [[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+svuint32_t test_addsubp_u32(svuint32_t zn, svuint32_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddsubp,_u32)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_addsubp_s64(
+// CHECK-SAME: <vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.addsubp.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x 
i64> [[TMP1]])
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> 
@_Z16test_addsubp_s64u11__SVInt64_tS_(
+// CPP-CHECK-SAME: <vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.addsubp.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x 
i64> [[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
+//
+svint64_t test_addsubp_s64(svint64_t zn, svint64_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddsubp,_s64)(zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_addsubp_u64(
+// CHECK-SAME: <vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) 
#[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.addsubp.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x 
i64> [[TMP1]])
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> 
@_Z16test_addsubp_u64u12__SVUint64_tS_(
+// CPP-CHECK-SAME: <vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> 
[[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.addsubp.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x 
i64> [[TMP1]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
+//
+svuint64_t test_addsubp_u64(svuint64_t zn, svuint64_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svaddsubp,_u64)(zn, zm);
+}
diff --git a/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_subp.c 
b/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_subp.c
new file mode 100644
index 0000000000000..02e5e44b117b7
--- /dev/null
+++ b/clang/test/CodeGen/AArch64/sve2p3-intrinsics/acle_sve2p3_subp.c
@@ -0,0 +1,322 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 6
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve -target-feature +sve2 -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve -target-feature +sve2 -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sme                       -target-feature +sme2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sve                       -target-feature +sme2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature 
+sme                       -target-feature +sve2p3 -disable-O0-optnone -Werror 
-Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve -target-feature +sve2 -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s 
-check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sme                       -target-feature +sme2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sve                       -target-feature +sme2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple 
aarch64 -target-feature +sme                       -target-feature +sve2p3 
-disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+// REQUIRES: aarch64-registered-target
+
+#include <arm_sve.h>
+
+#if defined(__ARM_FEATURE_SME) && defined(__ARM_FEATURE_SVE)
+#define ATTR __arm_streaming_compatible
+#elif defined(__ARM_FEATURE_SME)
+#define ATTR __arm_streaming
+#else
+#define ATTR
+#endif
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED) A1
+#else
+#define SVE_ACLE_FUNC(A1,A2) A1##A2
+#endif
+
+// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_subp_s8(
+// CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[ZN:%.*]], 
<vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.subp.nxv16i8(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i8> 
[[TMP1]], <vscale x 16 x i8> [[TMP2]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP3]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> 
@_Z12test_subp_s8u10__SVBool_tu10__SVInt8_tS0_(
+// CPP-CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> 
[[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0:[0-9]+]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.subp.nxv16i8(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i8> 
[[TMP1]], <vscale x 16 x i8> [[TMP2]])
+// CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP3]]
+//
+svint8_t test_subp_s8(svbool_t pg, svint8_t zn, svint8_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svsubp,_s8)(pg, zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_subp_u8(
+// CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[ZN:%.*]], 
<vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.subp.nxv16i8(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i8> 
[[TMP1]], <vscale x 16 x i8> [[TMP2]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP3]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> 
@_Z12test_subp_u8u10__SVBool_tu11__SVUint8_tS0_(
+// CPP-CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> 
[[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i8> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i8>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i8>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.subp.nxv16i8(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i8> 
[[TMP1]], <vscale x 16 x i8> [[TMP2]])
+// CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP3]]
+//
+svuint8_t test_subp_u8(svbool_t pg, svuint8_t zn, svuint8_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svsubp,_u8)(pg, zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_subp_s16(
+// CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 8 x i16> [[ZN:%.*]], 
<vscale x 8 x i16> [[ZM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[TMP0]])
+// CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.subp.nxv8i16(<vscale x 8 x i1> [[TMP3]], <vscale x 8 x i16> 
[[TMP1]], <vscale x 8 x i16> [[TMP2]])
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP4]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> 
@_Z13test_subp_s16u10__SVBool_tu11__SVInt16_tS0_(
+// CPP-CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 8 x i16> 
[[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[TMP0]])
+// CPP-CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.subp.nxv8i16(<vscale x 8 x i1> [[TMP3]], <vscale x 8 x i16> 
[[TMP1]], <vscale x 8 x i16> [[TMP2]])
+// CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP4]]
+//
+svint16_t test_subp_s16(svbool_t pg, svint16_t zn, svint16_t zm)ATTR
+{
+  return SVE_ACLE_FUNC(svsubp,_s16)(pg, zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_subp_u16(
+// CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 8 x i16> [[ZN:%.*]], 
<vscale x 8 x i16> [[ZM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[TMP0]])
+// CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.subp.nxv8i16(<vscale x 8 x i1> [[TMP3]], <vscale x 8 x i16> 
[[TMP1]], <vscale x 8 x i16> [[TMP2]])
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP4]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> 
@_Z13test_subp_u16u10__SVBool_tu12__SVUint16_tS0_(
+// CPP-CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 8 x i16> 
[[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 8 x i16> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i16>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 8 x i16>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[TMP0]])
+// CPP-CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.subp.nxv8i16(<vscale x 8 x i1> [[TMP3]], <vscale x 8 x i16> 
[[TMP1]], <vscale x 8 x i16> [[TMP2]])
+// CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP4]]
+//
+svuint16_t test_subp_u16(svbool_t pg, svuint16_t zn, svuint16_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svsubp,_u16)(pg, zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_subp_s32(
+// CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 4 x i32> [[ZN:%.*]], 
<vscale x 4 x i32> [[ZM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP0]])
+// CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.subp.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> 
[[TMP1]], <vscale x 4 x i32> [[TMP2]])
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP4]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> 
@_Z13test_subp_s32u10__SVBool_tu11__SVInt32_tS0_(
+// CPP-CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 4 x i32> 
[[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP0]])
+// CPP-CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.subp.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> 
[[TMP1]], <vscale x 4 x i32> [[TMP2]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP4]]
+//
+svint32_t test_subp_s32(svbool_t pg, svint32_t zn, svint32_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svsubp,_s32)(pg, zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_subp_u32(
+// CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 4 x i32> [[ZN:%.*]], 
<vscale x 4 x i32> [[ZM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP0]])
+// CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.subp.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> 
[[TMP1]], <vscale x 4 x i32> [[TMP2]])
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP4]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> 
@_Z13test_subp_u32u10__SVBool_tu12__SVUint32_tS0_(
+// CPP-CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 4 x i32> 
[[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 4 x i32> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP0]])
+// CPP-CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.subp.nxv4i32(<vscale x 4 x i1> [[TMP3]], <vscale x 4 x i32> 
[[TMP1]], <vscale x 4 x i32> [[TMP2]])
+// CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP4]]
+//
+svuint32_t test_subp_u32(svbool_t pg, svuint32_t zn, svuint32_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svsubp,_u32)(pg, zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_subp_s64(
+// CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 2 x i64> [[ZN:%.*]], 
<vscale x 2 x i64> [[ZM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
+// CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.subp.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> 
[[TMP1]], <vscale x 2 x i64> [[TMP2]])
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP4]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> 
@_Z13test_subp_s64u10__SVBool_tu11__SVInt64_tS0_(
+// CPP-CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 2 x i64> 
[[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
+// CPP-CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.subp.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> 
[[TMP1]], <vscale x 2 x i64> [[TMP2]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP4]]
+//
+svint64_t test_subp_s64(svbool_t pg, svint64_t zn, svint64_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svsubp,_s64)(pg, zn, zm);
+}
+
+// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_subp_u64(
+// CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 2 x i64> [[ZN:%.*]], 
<vscale x 2 x i64> [[ZM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
+// CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.subp.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> 
[[TMP1]], <vscale x 2 x i64> [[TMP2]])
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP4]]
+//
+// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> 
@_Z13test_subp_u64u10__SVBool_tu12__SVUint64_tS0_(
+// CPP-CHECK-SAME: <vscale x 16 x i1> [[PG:%.*]], <vscale x 2 x i64> 
[[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT:  [[ENTRY:.*:]]
+// CPP-CHECK-NEXT:    [[PG_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CPP-CHECK-NEXT:    [[ZN_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    [[ZM_ADDR:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CPP-CHECK-NEXT:    store <vscale x 16 x i1> [[PG]], ptr [[PG_ADDR]], align 2
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZN]], ptr [[ZN_ADDR]], align 
16
+// CPP-CHECK-NEXT:    store <vscale x 2 x i64> [[ZM]], ptr [[ZM_ADDR]], align 
16
+// CPP-CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[PG_ADDR]], 
align 2
+// CPP-CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i64>, ptr [[ZN_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x i64>, ptr [[ZM_ADDR]], 
align 16
+// CPP-CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP0]])
+// CPP-CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.subp.nxv2i64(<vscale x 2 x i1> [[TMP3]], <vscale x 2 x i64> 
[[TMP1]], <vscale x 2 x i64> [[TMP2]])
+// CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP4]]
+//
+svuint64_t test_subp_u64(svbool_t pg, svuint64_t zn, svuint64_t zm) ATTR
+{
+  return SVE_ACLE_FUNC(svsubp,_u64)(pg, zn, zm);
+}
diff --git 
a/clang/test/Sema/AArch64/arm_sve_feature_dependent_sve_AND_LP_sve2p3_OR_sme2p3_RP___sme_AND_LP_sve2p3_OR_sme2p3_RP.c
 
b/clang/test/Sema/AArch64/arm_sve_feature_dependent_sve_AND_LP_sve2p3_OR_sme2p3_RP___sme_AND_LP_sve2p3_OR_sme2p3_RP.c
new file mode 100644
index 0000000000000..8753407609d09
--- /dev/null
+++ 
b/clang/test/Sema/AArch64/arm_sve_feature_dependent_sve_AND_LP_sve2p3_OR_sme2p3_RP___sme_AND_LP_sve2p3_OR_sme2p3_RP.c
@@ -0,0 +1,193 @@
+// NOTE: File has been autogenerated by 
utils/aarch64_builtins_test_generator.py
+// RUN: %clang_cc1 %s -fsyntax-only -triple aarch64-none-linux-gnu 
-target-feature +sme -target-feature +sve -target-feature +sve2p3 -verify
+// RUN: %clang_cc1 %s -fsyntax-only -triple aarch64-none-linux-gnu 
-target-feature +sme -target-feature +sme2p3 -target-feature +sve -verify
+// expected-no-diagnostics
+
+// REQUIRES: aarch64-registered-target
+
+#include <arm_sve.h>
+
+// Properties: guard="sve,(sve2p3|sme2p3)" 
streaming_guard="sme,(sve2p3|sme2p3)" flags="feature-dependent"
+
+void test(void) {
+  svbool_t svbool_t_val;
+  svint8_t svint8_t_val;
+  svint16_t svint16_t_val;
+  svint32_t svint32_t_val;
+  svint64_t svint64_t_val;
+  svuint8_t svuint8_t_val;
+  svuint16_t svuint16_t_val;
+  svuint32_t svuint32_t_val;
+  svuint64_t svuint64_t_val;
+
+  svaddqp(svint8_t_val, svint8_t_val);
+  svaddqp(svint16_t_val, svint16_t_val);
+  svaddqp(svint32_t_val, svint32_t_val);
+  svaddqp(svint64_t_val, svint64_t_val);
+  svaddqp(svuint8_t_val, svuint8_t_val);
+  svaddqp(svuint16_t_val, svuint16_t_val);
+  svaddqp(svuint32_t_val, svuint32_t_val);
+  svaddqp(svuint64_t_val, svuint64_t_val);
+  svaddqp_s8(svint8_t_val, svint8_t_val);
+  svaddqp_s16(svint16_t_val, svint16_t_val);
+  svaddqp_s32(svint32_t_val, svint32_t_val);
+  svaddqp_s64(svint64_t_val, svint64_t_val);
+  svaddqp_u8(svuint8_t_val, svuint8_t_val);
+  svaddqp_u16(svuint16_t_val, svuint16_t_val);
+  svaddqp_u32(svuint32_t_val, svuint32_t_val);
+  svaddqp_u64(svuint64_t_val, svuint64_t_val);
+  svaddsubp(svint8_t_val, svint8_t_val);
+  svaddsubp(svint16_t_val, svint16_t_val);
+  svaddsubp(svint32_t_val, svint32_t_val);
+  svaddsubp(svint64_t_val, svint64_t_val);
+  svaddsubp(svuint8_t_val, svuint8_t_val);
+  svaddsubp(svuint16_t_val, svuint16_t_val);
+  svaddsubp(svuint32_t_val, svuint32_t_val);
+  svaddsubp(svuint64_t_val, svuint64_t_val);
+  svaddsubp_s8(svint8_t_val, svint8_t_val);
+  svaddsubp_s16(svint16_t_val, svint16_t_val);
+  svaddsubp_s32(svint32_t_val, svint32_t_val);
+  svaddsubp_s64(svint64_t_val, svint64_t_val);
+  svaddsubp_u8(svuint8_t_val, svuint8_t_val);
+  svaddsubp_u16(svuint16_t_val, svuint16_t_val);
+  svaddsubp_u32(svuint32_t_val, svuint32_t_val);
+  svaddsubp_u64(svuint64_t_val, svuint64_t_val);
+  svsubp(svbool_t_val, svint8_t_val, svint8_t_val);
+  svsubp(svbool_t_val, svint16_t_val, svint16_t_val);
+  svsubp(svbool_t_val, svint32_t_val, svint32_t_val);
+  svsubp(svbool_t_val, svint64_t_val, svint64_t_val);
+  svsubp(svbool_t_val, svuint8_t_val, svuint8_t_val);
+  svsubp(svbool_t_val, svuint16_t_val, svuint16_t_val);
+  svsubp(svbool_t_val, svuint32_t_val, svuint32_t_val);
+  svsubp(svbool_t_val, svuint64_t_val, svuint64_t_val);
+  svsubp_s8(svbool_t_val, svint8_t_val, svint8_t_val);
+  svsubp_s16(svbool_t_val, svint16_t_val, svint16_t_val);
+  svsubp_s32(svbool_t_val, svint32_t_val, svint32_t_val);
+  svsubp_s64(svbool_t_val, svint64_t_val, svint64_t_val);
+  svsubp_u8(svbool_t_val, svuint8_t_val, svuint8_t_val);
+  svsubp_u16(svbool_t_val, svuint16_t_val, svuint16_t_val);
+  svsubp_u32(svbool_t_val, svuint32_t_val, svuint32_t_val);
+  svsubp_u64(svbool_t_val, svuint64_t_val, svuint64_t_val);
+}
+
+void test_streaming(void) __arm_streaming{
+  svbool_t svbool_t_val;
+  svint8_t svint8_t_val;
+  svint16_t svint16_t_val;
+  svint32_t svint32_t_val;
+  svint64_t svint64_t_val;
+  svuint8_t svuint8_t_val;
+  svuint16_t svuint16_t_val;
+  svuint32_t svuint32_t_val;
+  svuint64_t svuint64_t_val;
+
+  svaddqp(svint8_t_val, svint8_t_val);
+  svaddqp(svint16_t_val, svint16_t_val);
+  svaddqp(svint32_t_val, svint32_t_val);
+  svaddqp(svint64_t_val, svint64_t_val);
+  svaddqp(svuint8_t_val, svuint8_t_val);
+  svaddqp(svuint16_t_val, svuint16_t_val);
+  svaddqp(svuint32_t_val, svuint32_t_val);
+  svaddqp(svuint64_t_val, svuint64_t_val);
+  svaddqp_s8(svint8_t_val, svint8_t_val);
+  svaddqp_s16(svint16_t_val, svint16_t_val);
+  svaddqp_s32(svint32_t_val, svint32_t_val);
+  svaddqp_s64(svint64_t_val, svint64_t_val);
+  svaddqp_u8(svuint8_t_val, svuint8_t_val);
+  svaddqp_u16(svuint16_t_val, svuint16_t_val);
+  svaddqp_u32(svuint32_t_val, svuint32_t_val);
+  svaddqp_u64(svuint64_t_val, svuint64_t_val);
+  svaddsubp(svint8_t_val, svint8_t_val);
+  svaddsubp(svint16_t_val, svint16_t_val);
+  svaddsubp(svint32_t_val, svint32_t_val);
+  svaddsubp(svint64_t_val, svint64_t_val);
+  svaddsubp(svuint8_t_val, svuint8_t_val);
+  svaddsubp(svuint16_t_val, svuint16_t_val);
+  svaddsubp(svuint32_t_val, svuint32_t_val);
+  svaddsubp(svuint64_t_val, svuint64_t_val);
+  svaddsubp_s8(svint8_t_val, svint8_t_val);
+  svaddsubp_s16(svint16_t_val, svint16_t_val);
+  svaddsubp_s32(svint32_t_val, svint32_t_val);
+  svaddsubp_s64(svint64_t_val, svint64_t_val);
+  svaddsubp_u8(svuint8_t_val, svuint8_t_val);
+  svaddsubp_u16(svuint16_t_val, svuint16_t_val);
+  svaddsubp_u32(svuint32_t_val, svuint32_t_val);
+  svaddsubp_u64(svuint64_t_val, svuint64_t_val);
+  svsubp(svbool_t_val, svint8_t_val, svint8_t_val);
+  svsubp(svbool_t_val, svint16_t_val, svint16_t_val);
+  svsubp(svbool_t_val, svint32_t_val, svint32_t_val);
+  svsubp(svbool_t_val, svint64_t_val, svint64_t_val);
+  svsubp(svbool_t_val, svuint8_t_val, svuint8_t_val);
+  svsubp(svbool_t_val, svuint16_t_val, svuint16_t_val);
+  svsubp(svbool_t_val, svuint32_t_val, svuint32_t_val);
+  svsubp(svbool_t_val, svuint64_t_val, svuint64_t_val);
+  svsubp_s8(svbool_t_val, svint8_t_val, svint8_t_val);
+  svsubp_s16(svbool_t_val, svint16_t_val, svint16_t_val);
+  svsubp_s32(svbool_t_val, svint32_t_val, svint32_t_val);
+  svsubp_s64(svbool_t_val, svint64_t_val, svint64_t_val);
+  svsubp_u8(svbool_t_val, svuint8_t_val, svuint8_t_val);
+  svsubp_u16(svbool_t_val, svuint16_t_val, svuint16_t_val);
+  svsubp_u32(svbool_t_val, svuint32_t_val, svuint32_t_val);
+  svsubp_u64(svbool_t_val, svuint64_t_val, svuint64_t_val);
+}
+
+void test_streaming_compatible(void) __arm_streaming_compatible{
+  svbool_t svbool_t_val;
+  svint8_t svint8_t_val;
+  svint16_t svint16_t_val;
+  svint32_t svint32_t_val;
+  svint64_t svint64_t_val;
+  svuint8_t svuint8_t_val;
+  svuint16_t svuint16_t_val;
+  svuint32_t svuint32_t_val;
+  svuint64_t svuint64_t_val;
+
+  svaddqp(svint8_t_val, svint8_t_val);
+  svaddqp(svint16_t_val, svint16_t_val);
+  svaddqp(svint32_t_val, svint32_t_val);
+  svaddqp(svint64_t_val, svint64_t_val);
+  svaddqp(svuint8_t_val, svuint8_t_val);
+  svaddqp(svuint16_t_val, svuint16_t_val);
+  svaddqp(svuint32_t_val, svuint32_t_val);
+  svaddqp(svuint64_t_val, svuint64_t_val);
+  svaddqp_s8(svint8_t_val, svint8_t_val);
+  svaddqp_s16(svint16_t_val, svint16_t_val);
+  svaddqp_s32(svint32_t_val, svint32_t_val);
+  svaddqp_s64(svint64_t_val, svint64_t_val);
+  svaddqp_u8(svuint8_t_val, svuint8_t_val);
+  svaddqp_u16(svuint16_t_val, svuint16_t_val);
+  svaddqp_u32(svuint32_t_val, svuint32_t_val);
+  svaddqp_u64(svuint64_t_val, svuint64_t_val);
+  svaddsubp(svint8_t_val, svint8_t_val);
+  svaddsubp(svint16_t_val, svint16_t_val);
+  svaddsubp(svint32_t_val, svint32_t_val);
+  svaddsubp(svint64_t_val, svint64_t_val);
+  svaddsubp(svuint8_t_val, svuint8_t_val);
+  svaddsubp(svuint16_t_val, svuint16_t_val);
+  svaddsubp(svuint32_t_val, svuint32_t_val);
+  svaddsubp(svuint64_t_val, svuint64_t_val);
+  svaddsubp_s8(svint8_t_val, svint8_t_val);
+  svaddsubp_s16(svint16_t_val, svint16_t_val);
+  svaddsubp_s32(svint32_t_val, svint32_t_val);
+  svaddsubp_s64(svint64_t_val, svint64_t_val);
+  svaddsubp_u8(svuint8_t_val, svuint8_t_val);
+  svaddsubp_u16(svuint16_t_val, svuint16_t_val);
+  svaddsubp_u32(svuint32_t_val, svuint32_t_val);
+  svaddsubp_u64(svuint64_t_val, svuint64_t_val);
+  svsubp(svbool_t_val, svint8_t_val, svint8_t_val);
+  svsubp(svbool_t_val, svint16_t_val, svint16_t_val);
+  svsubp(svbool_t_val, svint32_t_val, svint32_t_val);
+  svsubp(svbool_t_val, svint64_t_val, svint64_t_val);
+  svsubp(svbool_t_val, svuint8_t_val, svuint8_t_val);
+  svsubp(svbool_t_val, svuint16_t_val, svuint16_t_val);
+  svsubp(svbool_t_val, svuint32_t_val, svuint32_t_val);
+  svsubp(svbool_t_val, svuint64_t_val, svuint64_t_val);
+  svsubp_s8(svbool_t_val, svint8_t_val, svint8_t_val);
+  svsubp_s16(svbool_t_val, svint16_t_val, svint16_t_val);
+  svsubp_s32(svbool_t_val, svint32_t_val, svint32_t_val);
+  svsubp_s64(svbool_t_val, svint64_t_val, svint64_t_val);
+  svsubp_u8(svbool_t_val, svuint8_t_val, svuint8_t_val);
+  svsubp_u16(svbool_t_val, svuint16_t_val, svuint16_t_val);
+  svsubp_u32(svbool_t_val, svuint32_t_val, svuint32_t_val);
+  svsubp_u64(svbool_t_val, svuint64_t_val, svuint64_t_val);
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td 
b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 75929cbc222ad..04cd99b50e727 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -2564,6 +2564,10 @@ def int_aarch64_sve_sminp   : 
AdvSIMD_Pred2VectorArg_Intrinsic<[IntrSpeculatable
 def int_aarch64_sve_umaxp   : 
AdvSIMD_Pred2VectorArg_Intrinsic<[IntrSpeculatable]>;
 def int_aarch64_sve_uminp   : 
AdvSIMD_Pred2VectorArg_Intrinsic<[IntrSpeculatable]>;
 
+def int_aarch64_sve_addqp   : AdvSIMD_2VectorArg_Intrinsic<[IntrSpeculatable]>;
+def int_aarch64_sve_addsubp : AdvSIMD_2VectorArg_Intrinsic<[IntrSpeculatable]>;
+def int_aarch64_sve_subp    : AdvSIMD_Pred2VectorArg_Intrinsic;
+
 //
 // SVE2 - Widening pairwise arithmetic
 //
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td 
b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index c5a3bd504adf9..bcb3af6792b5b 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -4793,11 +4793,11 @@ let Predicates = [HasSVE2p2_or_SME2p2] in {
 
//===----------------------------------------------------------------------===//
 let Predicates = [HasSVE2p3_or_SME2p3] in {
   // SVE2 Add pairwise within quadword vector segments (unpredicated)
-  defm ADDQP_ZZZ     : sve2_int_mul<0b110, "addqp",   null_frag>;
+  defm ADDQP_ZZZ     : sve2_int_mul<0b110, "addqp",   int_aarch64_sve_addqp>;
 
   // SVE2 Add subtract/subtract pairwise
-  defm ADDSUBP_ZZZ   : sve2_int_mul<0b111, "addsubp", null_frag>;
-  defm SUBP_ZPmZZ    : sve2_int_arith_pred<0b100001, "subp", null_frag>;
+  defm ADDSUBP_ZZZ   : sve2_int_mul<0b111, "addsubp", int_aarch64_sve_addsubp>;
+  defm SUBP_ZPmZZ    : sve2_int_arith_pred<0b100001, "subp", 
int_aarch64_sve_subp>;
 
   // SVE2 integer absolute difference and accumulate long
   defm SABAL_ZZZ : sve2_int_two_way_absdiff_accum_long<0b0, "sabal">;

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to