This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG2ed7db0d206b: [InstSimplify] Remove redundant 
{insert,extract}_vector intrinsic chains (authored by joechrisellis).

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D101986/new/

https://reviews.llvm.org/D101986

Files:
  clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
  clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
  llvm/lib/Analysis/InstructionSimplify.cpp
  llvm/test/Transforms/InstSimplify/extract-vector.ll
  llvm/test/Transforms/InstSimplify/insert-vector.ll

Index: llvm/test/Transforms/InstSimplify/insert-vector.ll
===================================================================
--- /dev/null
+++ llvm/test/Transforms/InstSimplify/insert-vector.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+define <vscale x 16 x i8> @redundant_extract_insert_chain(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %x) {
+; CHECK-LABEL: @redundant_extract_insert_chain(
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[X:%.*]]
+;
+  %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8> %x, i64 0)
+  %inserted = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> %extracted, i64 0)
+  ret <vscale x 16 x i8> %inserted
+}
+
+define <vscale x 16 x i8> @non_redundant_extract_insert_chain(<vscale x 16 x i1> %pg, <vscale x 32 x i8> %x) {
+; CHECK-LABEL: @non_redundant_extract_insert_chain(
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8> [[X:%.*]], i64 0)
+; CHECK-NEXT:    [[INSERTED:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> [[EXTRACTED]], i64 0)
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[INSERTED]]
+;
+  %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8> %x, i64 0)
+  %inserted = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> %extracted, i64 0)
+  ret <vscale x 16 x i8> %inserted
+}
+
+declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8>, i64)
+declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8>, i64)
+declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8>, <32 x i8>, i64)
Index: llvm/test/Transforms/InstSimplify/extract-vector.ll
===================================================================
--- /dev/null
+++ llvm/test/Transforms/InstSimplify/extract-vector.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+define <16 x i8> @redundant_insert_extract_chain(<16 x i8> %x) {
+; CHECK-LABEL: @redundant_insert_extract_chain(
+; CHECK-NEXT:    ret <16 x i8> [[X:%.*]]
+;
+  %inserted = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> %x, i64 0)
+  %extracted = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(<vscale x 32 x i8> %inserted, i64 0)
+  ret <16 x i8> %extracted
+}
+
+define <8 x i8> @non_redundant_insert_extract_chain(<16 x i8> %x) {
+; CHECK-LABEL: @non_redundant_insert_extract_chain(
+; CHECK-NEXT:    [[INSERTED:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8> [[INSERTED]], i64 0)
+; CHECK-NEXT:    ret <8 x i8> [[EXTRACTED]]
+;
+  %inserted = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> %x, i64 0)
+  %extracted = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8> %inserted, i64 0)
+  ret <8 x i8> %extracted
+}
+
+declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(<vscale x 32 x i8>, i64)
+declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8>, i64)
+declare <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8>, <16 x i8>, i64)
Index: llvm/lib/Analysis/InstructionSimplify.cpp
===================================================================
--- llvm/lib/Analysis/InstructionSimplify.cpp
+++ llvm/lib/Analysis/InstructionSimplify.cpp
@@ -5625,6 +5625,19 @@
 
     break;
   }
+  case Intrinsic::experimental_vector_extract: {
+    Type *ReturnType = F->getReturnType();
+
+    // (extract_vector (insert_vector _, X, 0), 0) -> X
+    unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
+    Value *X = nullptr;
+    if (match(Op0, m_Intrinsic<Intrinsic::experimental_vector_insert>(
+                       m_Value(), m_Value(X), m_Zero())) &&
+        IdxN == 0 && X->getType() == ReturnType)
+      return X;
+
+    break;
+  }
   default:
     break;
   }
@@ -5720,6 +5733,21 @@
 
     return nullptr;
   }
+  case Intrinsic::experimental_vector_insert: {
+    Value *SubVec = Call->getArgOperand(1);
+    Value *Idx = Call->getArgOperand(2);
+    Type *ReturnType = F->getReturnType();
+
+    // (insert_vector _, (extract_vector X, 0), 0) -> X
+    unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
+    Value *X = nullptr;
+    if (match(SubVec, m_Intrinsic<Intrinsic::experimental_vector_extract>(
+                          m_Value(X), m_Zero())) &&
+        IdxN == 0 && X->getType() == ReturnType)
+      return X;
+
+    return nullptr;
+  }
   default:
     return nullptr;
   }
Index: clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
===================================================================
--- clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
+++ clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
@@ -13,9 +13,7 @@
 
 // CHECK-LABEL: @to_svint32_t(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TYPE_COERCE:%.*]]
 //
 svint32_t to_svint32_t(fixed_int32_t type) {
   return type;
@@ -23,9 +21,7 @@
 
 // CHECK-LABEL: @from_svint32_t(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TYPE:%.*]]
 //
 fixed_int32_t from_svint32_t(svint32_t type) {
   return type;
@@ -33,9 +29,7 @@
 
 // CHECK-LABEL: @to_svfloat64_t(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TYPE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TYPE_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[TYPE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TYPE_COERCE:%.*]]
 //
 svfloat64_t to_svfloat64_t(fixed_float64_t type) {
   return type;
@@ -43,9 +37,7 @@
 
 // CHECK-LABEL: @from_svfloat64_t(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TYPE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TYPE:%.*]]
 //
 fixed_float64_t from_svfloat64_t(svfloat64_t type) {
   return type;
@@ -57,10 +49,10 @@
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <8 x i8>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
-// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, [[TBAA6:!tbaa !.*]]
-// CHECK-NEXT:    store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-NEXT:    store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP2]]
 //
 svbool_t to_svbool_t(fixed_bool_t type) {
@@ -71,9 +63,9 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]]
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[TYPE_ADDR]] to <8 x i8>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
 // CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
@@ -85,7 +77,7 @@
 
 // CHECK-LABEL: @to_svint32_t__from_gnu_int32_t(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
 //
@@ -96,7 +88,7 @@
 // CHECK-LABEL: @from_svint32_t__to_gnu_int32_t(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE:%.*]], i64 0)
-// CHECK-NEXT:    store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]]
+// CHECK-NEXT:    store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    ret void
 //
 gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) {
@@ -105,7 +97,7 @@
 
 // CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
 //
@@ -116,7 +108,7 @@
 // CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]]
+// CHECK-NEXT:    store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    ret void
 //
 gnu_int32_t from_fixed_int32_t__to_gnu_int32_t(fixed_int32_t type) {
Index: clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
===================================================================
--- clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
+++ clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
@@ -24,11 +24,7 @@
 
 // CHECK-LABEL: @fixed_caller(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[X]], i64 0)
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[CASTSCALABLESVE]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE1]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[X_COERCE:%.*]]
 //
 fixed_int32_t fixed_caller(fixed_int32_t x) {
   return sizeless_callee(x);
@@ -36,9 +32,7 @@
 
 // CHECK-LABEL: @fixed_callee(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[X]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[X_COERCE:%.*]]
 //
 fixed_int32_t fixed_callee(fixed_int32_t x) {
   return x;
@@ -47,12 +41,9 @@
 // CHECK-LABEL: @sizeless_caller(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[COERCE1:%.*]] = alloca <16 x i32>, align 16
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    [[CALL:%.*]] = call <vscale x 4 x i32> @fixed_callee(<vscale x 4 x i32> [[CASTSCALABLESVE]])
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[COERCE1]] to <vscale x 4 x i32>*
-// CHECK-NEXT:    store <vscale x 4 x i32> [[CALL]], <vscale x 4 x i32>* [[TMP0]], align 16
-// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6:!tbaa !.*]]
+// CHECK-NEXT:    store <vscale x 4 x i32> [[X:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, !tbaa [[TBAA6:![0-9]+]]
 // CHECK-NEXT:    [[CASTSCALABLESVE2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP1]], i64 0)
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE2]]
 //
@@ -66,15 +57,9 @@
 
 // CHECK-LABEL: @call_int32_ff(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP1_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[OP2:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP2_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP1]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP2]], i64 0)
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[CASTSCALABLESVE2]])
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32> [[OP2_COERCE:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) {
   return svsel(pg, op1, op2);
@@ -82,15 +67,9 @@
 
 // CHECK-LABEL: @call_float64_ff(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP1_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[OP2:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP2_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP1]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE2:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP2]], i64 0)
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[CASTSCALABLESVE2]])
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE3:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double> [[OP2_COERCE:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_t op2) {
   return svsel(pg, op1, op2);
@@ -106,20 +85,20 @@
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
-// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[OP2_COERCE:%.*]], <vscale x 16 x i1>* [[TMP1]], align 16
-// CHECK-NEXT:    [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, [[TBAA6]]
-// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]]
-// CHECK-NEXT:    store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP2]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP2]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP4]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP4]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9:![0-9]+]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
-// CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
 // CHECK-NEXT:    store <8 x i8> [[TMP7]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP8:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
@@ -135,13 +114,9 @@
 
 // CHECK-LABEL: @call_int32_fs(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP1_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP1]], i64 0)
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[OP2:%.*]])
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE1]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) {
   return svsel(pg, op1, op2);
@@ -149,13 +124,9 @@
 
 // CHECK-LABEL: @call_float64_fs(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP1_COERCE:%.*]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP1]], i64 0)
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[OP2:%.*]])
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE1:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE1]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double> [[OP2:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op2) {
   return svsel(pg, op1, op2);
@@ -169,14 +140,14 @@
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
-// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]]
-// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]]
+// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
-// CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
 // CHECK-NEXT:    store <8 x i8> [[TMP4]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
@@ -194,9 +165,7 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
 fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) {
   return svsel(pg, op1, op2);
@@ -206,9 +175,7 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
 // CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]])
-// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0)
-// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[CASTFIXEDSVE]], i64 0)
-// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
 fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) {
   return svsel(pg, op1, op2);
@@ -219,9 +186,9 @@
 // CHECK-NEXT:    [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]])
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]]
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9]]
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
 // CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to