c-rhodes updated this revision to Diff 291891.
c-rhodes edited the summary of this revision.
c-rhodes added a comment.

Added tests for implicit casts between GNU and fixed-length SVE vectors. I was 
planning to create a separate patch for this but since it's only tests and no 
functional changes I've included it in this patch.


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D87607/new/

https://reviews.llvm.org/D87607

Files:
  clang/lib/AST/ASTContext.cpp
  clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
  clang/test/Sema/attr-arm-sve-vector-bits.c
  clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp

Index: clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp
===================================================================
--- clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp
+++ clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp
@@ -1,14 +1,26 @@
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fsyntax-only -verify -std=c++11 -msve-vector-bits=512 -fallow-half-arguments-and-returns %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -ffreestanding -fsyntax-only -verify -std=c++11 -msve-vector-bits=512 -fallow-half-arguments-and-returns %s
 // expected-no-diagnostics
 
+#include <stdint.h>
+
 #define N __ARM_FEATURE_SVE_BITS
 
 typedef __SVInt8_t svint8_t;
 typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N)));
+typedef int8_t gnu_int8_t __attribute__((vector_size(N / 8)));
 
 template<typename T> struct S { T var; };
 
 S<fixed_int8_t> s;
 
+// Test implicit casts between VLA and VLS vectors
 svint8_t to_svint8_t(fixed_int8_t x) { return x; }
 fixed_int8_t from_svint8_t(svint8_t x) { return x; }
+
+// Test implicit casts between GNU and VLA vectors
+svint8_t to_svint8_t__from_gnu_int8_t(gnu_int8_t x) { return x; }
+gnu_int8_t from_svint8_t__to_gnu_int8_t(svint8_t x) { return x; }
+
+// Test implicit casts between GNU and VLS vectors
+fixed_int8_t to_fixed_int8_t__from_gnu_int8_t(gnu_int8_t x) { return x; }
+gnu_int8_t from_fixed_int8_t__to_gnu_int8_t(fixed_int8_t x) { return x; }
Index: clang/test/Sema/attr-arm-sve-vector-bits.c
===================================================================
--- clang/test/Sema/attr-arm-sve-vector-bits.c
+++ clang/test/Sema/attr-arm-sve-vector-bits.c
@@ -1,11 +1,16 @@
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fsyntax-only -verify -msve-vector-bits=128 -fallow-half-arguments-and-returns %s
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fsyntax-only -verify -msve-vector-bits=256 -fallow-half-arguments-and-returns %s
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fsyntax-only -verify -msve-vector-bits=512 -fallow-half-arguments-and-returns %s
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fsyntax-only -verify -msve-vector-bits=1024 -fallow-half-arguments-and-returns %s
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fsyntax-only -verify -msve-vector-bits=2048 -fallow-half-arguments-and-returns %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -ffreestanding -fsyntax-only -verify -msve-vector-bits=128 -fallow-half-arguments-and-returns %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -ffreestanding -fsyntax-only -verify -msve-vector-bits=256 -fallow-half-arguments-and-returns %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -ffreestanding -fsyntax-only -verify -msve-vector-bits=512 -fallow-half-arguments-and-returns %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -ffreestanding -fsyntax-only -verify -msve-vector-bits=1024 -fallow-half-arguments-and-returns %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -ffreestanding -fsyntax-only -verify -msve-vector-bits=2048 -fallow-half-arguments-and-returns %s
+
+#include <stdint.h>
 
 #define N __ARM_FEATURE_SVE_BITS
 
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
 typedef __SVInt8_t svint8_t;
 typedef __SVInt16_t svint16_t;
 typedef __SVInt32_t svint32_t;
@@ -19,6 +24,7 @@
 typedef __SVFloat64_t svfloat64_t;
 
 #if defined(__ARM_FEATURE_SVE_BF16)
+typedef __bf16 bfloat16_t;
 typedef __SVBFloat16_t svbfloat16_t;
 #endif
 
@@ -43,6 +49,23 @@
 
 typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
 
+// GNU vector types
+typedef int8_t gnu_int8_t __attribute__((vector_size(N / 8)));
+typedef int16_t gnu_int16_t __attribute__((vector_size(N / 8)));
+typedef int32_t gnu_int32_t __attribute__((vector_size(N / 8)));
+typedef int64_t gnu_int64_t __attribute__((vector_size(N / 8)));
+
+typedef uint8_t gnu_uint8_t __attribute__((vector_size(N / 8)));
+typedef uint16_t gnu_uint16_t __attribute__((vector_size(N / 8)));
+typedef uint32_t gnu_uint32_t __attribute__((vector_size(N / 8)));
+typedef uint64_t gnu_uint64_t __attribute__((vector_size(N / 8)));
+
+typedef float16_t gnu_float16_t __attribute__((vector_size(N / 8)));
+typedef float32_t gnu_float32_t __attribute__((vector_size(N / 8)));
+typedef float64_t gnu_float64_t __attribute__((vector_size(N / 8)));
+
+typedef bfloat16_t gnu_bfloat16_t __attribute__((vector_size(N / 8)));
+
 // Attribute must have a single argument
 typedef svint8_t no_argument __attribute__((arm_sve_vector_bits));         // expected-error {{'arm_sve_vector_bits' attribute takes one argument}}
 typedef svint8_t two_arguments __attribute__((arm_sve_vector_bits(2, 4))); // expected-error {{'arm_sve_vector_bits' attribute takes one argument}}
@@ -176,38 +199,51 @@
 // --------------------------------------------------------------------------//
 // Implicit casts
 
-#define TEST_CAST(TYPE)                                          \
-  sv##TYPE##_t to_sv##TYPE##_t(fixed_##TYPE##_t x) { return x; } \
-  fixed_##TYPE##_t from_sv##TYPE##_t(sv##TYPE##_t x) { return x; }
-
-TEST_CAST(int8)
-TEST_CAST(int16)
-TEST_CAST(int32)
-TEST_CAST(int64)
-TEST_CAST(uint8)
-TEST_CAST(uint16)
-TEST_CAST(uint32)
-TEST_CAST(uint64)
-TEST_CAST(float16)
-TEST_CAST(float32)
-TEST_CAST(float64)
-TEST_CAST(bfloat16)
-TEST_CAST(bool)
+#define TEST_CAST_COMMON(TYPE)                                              \
+  sv##TYPE##_t to_sv##TYPE##_t_from_fixed(fixed_##TYPE##_t x) { return x; } \
+  fixed_##TYPE##_t from_sv##TYPE##_t_to_fixed(sv##TYPE##_t x) { return x; }
+
+#define TEST_CAST_GNU(PREFIX, TYPE)                                                          \
+  gnu_##TYPE##_t to_gnu_##TYPE##_t_from_##PREFIX##TYPE##_t(PREFIX##TYPE##_t x) { return x; } \
+  PREFIX##TYPE##_t from_gnu_##TYPE##_t_to_##PREFIX##TYPE##_t(gnu_##TYPE##_t x) { return x; }
+
+#define TEST_CAST_VECTOR(TYPE) \
+  TEST_CAST_COMMON(TYPE)       \
+  TEST_CAST_GNU(sv, TYPE)      \
+  TEST_CAST_GNU(fixed_, TYPE)  \
+
+TEST_CAST_VECTOR(int8)
+TEST_CAST_VECTOR(int16)
+TEST_CAST_VECTOR(int32)
+TEST_CAST_VECTOR(int64)
+TEST_CAST_VECTOR(uint8)
+TEST_CAST_VECTOR(uint16)
+TEST_CAST_VECTOR(uint32)
+TEST_CAST_VECTOR(uint64)
+TEST_CAST_VECTOR(float16)
+TEST_CAST_VECTOR(float32)
+TEST_CAST_VECTOR(float64)
+TEST_CAST_VECTOR(bfloat16)
+TEST_CAST_COMMON(bool)
 
 // Test the implicit conversion only applies to valid types
 fixed_int8_t to_fixed_int8_t__from_svuint8_t(svuint8_t x) { return x; } // expected-error-re {{returning 'svuint8_t' (aka '__SVUint8_t') from a function with incompatible result type 'fixed_int8_t' (vector of {{[0-9]+}} 'signed char' values)}}
 fixed_bool_t to_fixed_bool_t__from_svint32_t(svint32_t x) { return x; } // expected-error-re {{returning 'svint32_t' (aka '__SVInt32_t') from a function with incompatible result type 'fixed_bool_t' (vector of {{[0-9]+}} 'unsigned char' values)}}
 
+svint64_t to_svint64_t__from_gnu_int32_t(gnu_int32_t x) { return x; } // expected-error-re {{returning 'gnu_int32_t' (vector of {{[0-9]+}} 'int32_t' values) from a function with incompatible result type 'svint64_t' (aka '__SVInt64_t')}}
+gnu_int32_t from_svint64_t__to_gnu_int32_t(svint64_t x) { return x; } // expected-error-re {{returning 'svint64_t' (aka '__SVInt64_t') from a function with incompatible result type 'gnu_int32_t' (vector of {{[0-9]+}} 'int32_t' values)}}
+
+// Test implicit conversion between SVE and GNU vector is invalid when
+// __ARM_FEATURE_SVE_BITS != N
+#if defined(__ARM_FEATURE_SVE_BITS) && __ARM_FEATURE_SVE_BITS == 512
+typedef int32_t int4 __attribute__((vector_size(16)));
+svint64_t badcast(int4 x) { return x; } // expected-error-re {{returning 'int4' (vector of {{[0-9]+}} 'int32_t' values) from a function with incompatible result type 'svint64_t' (aka '__SVInt64_t')}}
+#endif
+
 // Test conversion between predicate and uint8 is invalid, both have the same
 // memory representation.
 fixed_bool_t to_fixed_bool_t__from_svuint8_t(svuint8_t x) { return x; } // expected-error-re {{returning 'svuint8_t' (aka '__SVUint8_t') from a function with incompatible result type 'fixed_bool_t' (vector of {{[0-9]+}} 'unsigned char' values)}}
 
-// Test the implicit conversion only applies to fixed-length types
-typedef signed int vSInt32 __attribute__((__vector_size__(16)));
-svint32_t to_svint32_t_from_gnut(vSInt32 x) { return x; } // expected-error-re {{returning 'vSInt32' (vector of {{[0-9]+}} 'int' values) from a function with incompatible result type 'svint32_t' (aka '__SVInt32_t')}}
-
-vSInt32 to_gnut_from_svint32_t(svint32_t x) { return x; } // expected-error-re {{returning 'svint32_t' (aka '__SVInt32_t') from a function with incompatible result type 'vSInt32' (vector of {{[0-9]+}} 'int' values)}}
-
 // --------------------------------------------------------------------------//
 // Test the scalable and fixed-length types can be used interchangeably
 
Index: clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
===================================================================
--- clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
+++ clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
@@ -9,6 +9,7 @@
 typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
 typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N)));
 typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
+typedef int32_t gnu_int32_t __attribute__((vector_size(N / 8)));
 
 // CHECK-LABEL: @to_svint32_t(
 // CHECK-NEXT:  entry:
@@ -16,10 +17,10 @@
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <16 x i32>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to <vscale x 4 x i32>*
 // CHECK-NEXT:    store <vscale x 4 x i32> [[TYPE_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
-// CHECK-NEXT:    [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, !tbaa !2
-// CHECK-NEXT:    store <16 x i32> [[TYPE1]], <16 x i32>* [[TYPE_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA2:!tbaa !.*]]
+// CHECK-NEXT:    store <16 x i32> [[TYPE1]], <16 x i32>* [[TYPE_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i32>* [[TYPE_ADDR]] to <vscale x 4 x i32>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
 //
 svint32_t to_svint32_t(fixed_int32_t type) {
@@ -30,9 +31,9 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
-// CHECK-NEXT:    store <vscale x 4 x i32> [[TYPE:%.*]], <vscale x 4 x i32>* [[TYPE_ADDR]], align 16, !tbaa !5
+// CHECK-NEXT:    store <vscale x 4 x i32> [[TYPE:%.*]], <vscale x 4 x i32>* [[TYPE_ADDR]], align 16, [[TBAA5:!tbaa !.*]]
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32>* [[TYPE_ADDR]] to <16 x i32>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
 // CHECK-NEXT:    store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
@@ -48,10 +49,10 @@
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <8 x double>, align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x double>* [[TYPE]] to <vscale x 2 x double>*
 // CHECK-NEXT:    store <vscale x 2 x double> [[TYPE_COERCE:%.*]], <vscale x 2 x double>* [[TMP0]], align 16
-// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x double>, <8 x double>* [[TYPE]], align 16, !tbaa !2
-// CHECK-NEXT:    store <8 x double> [[TYPE1]], <8 x double>* [[TYPE_ADDR]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x double>, <8 x double>* [[TYPE]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <8 x double> [[TYPE1]], <8 x double>* [[TYPE_ADDR]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x double>* [[TYPE_ADDR]] to <vscale x 2 x double>*
-// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    ret <vscale x 2 x double> [[TMP2]]
 //
 svfloat64_t to_svfloat64_t(fixed_float64_t type) {
@@ -62,9 +63,9 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 2 x double>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x double>, align 16
-// CHECK-NEXT:    store <vscale x 2 x double> [[TYPE:%.*]], <vscale x 2 x double>* [[TYPE_ADDR]], align 16, !tbaa !7
+// CHECK-NEXT:    store <vscale x 2 x double> [[TYPE:%.*]], <vscale x 2 x double>* [[TYPE_ADDR]], align 16, [[TBAA7:!tbaa !.*]]
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double>* [[TYPE_ADDR]] to <8 x double>*
-// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 2 x double>* [[RETVAL_COERCE]] to <8 x double>*
 // CHECK-NEXT:    store <8 x double> [[TMP1]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16
 // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[RETVAL_COERCE]], align 16
@@ -81,11 +82,11 @@
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE]] to i64*
-// CHECK-NEXT:    [[TYPE12:%.*]] = load i64, i64* [[TMP1]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TYPE12:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to i64*
-// CHECK-NEXT:    store i64 [[TYPE12]], i64* [[TMP2]], align 16, !tbaa !2
+// CHECK-NEXT:    store i64 [[TYPE12]], i64* [[TMP2]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP3]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP3]], align 16, [[TBAA2]]
 // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP4]]
 //
 svbool_t to_svbool_t(fixed_bool_t type) {
@@ -96,9 +97,9 @@
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
 // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
-// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, !tbaa !9
+// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[TYPE_ADDR]] to i64*
-// CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, !tbaa !2
+// CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA2]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to i64*
 // CHECK-NEXT:    store i64 [[TMP1]], i64* [[TMP2]], align 16
 // CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
@@ -107,3 +108,55 @@
 fixed_bool_t from_svbool_t(svbool_t type) {
   return type;
 }
+
+// CHECK-LABEL: @to_svint32_t__from_gnu_int32_t(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <16 x i32>, align 16
+// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <16 x i32> [[TYPE]], <16 x i32>* [[TYPE_ADDR]], align 16, [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i32>* [[TYPE_ADDR]] to <vscale x 4 x i32>*
+// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], align 16, [[TBAA2]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
+//
+svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) {
+  return type;
+}
+
+// CHECK-LABEL: @from_svint32_t__to_gnu_int32_t(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> [[TYPE:%.*]], <vscale x 4 x i32>* [[TYPE_ADDR]], align 16, [[TBAA5]]
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32>* [[TYPE_ADDR]] to <16 x i32>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <16 x i32> [[TMP1]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) {
+  return type;
+}
+
+// CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA2]]
+// CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 4 x i32>* [[RETVAL_COERCE]] to <16 x i32>*
+// CHECK-NEXT:    store <16 x i32> [[TYPE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[RETVAL_COERCE]], align 16
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
+//
+fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) {
+  return type;
+}
+
+// CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TYPE:%.*]] = alloca <16 x i32>, align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to <vscale x 4 x i32>*
+// CHECK-NEXT:    store <vscale x 4 x i32> [[TYPE_COERCE:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
+// CHECK-NEXT:    [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA2]]
+// CHECK-NEXT:    store <16 x i32> [[TYPE1]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+gnu_int32_t from_fixed_int32_t__to_gnu_int32_t(fixed_int32_t type) {
+  return type;
+}
Index: clang/lib/AST/ASTContext.cpp
===================================================================
--- clang/lib/AST/ASTContext.cpp
+++ clang/lib/AST/ASTContext.cpp
@@ -8516,6 +8516,10 @@
         else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
           return VT->getElementType().getCanonicalType() ==
                  FirstType->getSveEltType(*this);
+        else if (VT->getVectorKind() == VectorType::GenericVector &&
+                 getTypeSize(SecondType) == getLangOpts().ArmSveVectorBits)
+          return VT->getElementType().getCanonicalType() ==
+                 getBuiltinVectorTypeInfo(BT).ElementType;
       }
     }
     return false;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to