This patch splits out some of the qualifier handling from the v1 patch, and
adjusts the VREINTERPRET* macros to include support for mf8 intrinsics.

Bootstrapped and regression tested on aarch64; ok for master?

gcc/ChangeLog:

        * config/aarch64/aarch64-builtins.cc (MODE_d_mf8): New.
        (MODE_q_mf8): New.
        (QUAL_mf8): New.
        (VREINTERPRET_BUILTINS1): Add mf8 entry.
        (VREINTERPRET_BUILTINS): Ditto.
        (VREINTERPRETQ_BUILTINS1): Ditto.
        (VREINTERPRETQ_BUILTINS): Ditto.
        (aarch64_lookup_simd_type_in_table): Match modal_float bit

gcc/testsuite/ChangeLog:

        * gcc.target/aarch64/advsimd-intrinsics/mf8-reinterpret.c: New test.


diff --git a/gcc/config/aarch64/aarch64-builtins.cc 
b/gcc/config/aarch64/aarch64-builtins.cc
index 
432131c3b2d7cf4f788b79ce3d84c9e7554dc750..31231c9e66ee8307cb86e181fc51ea2622c5f82c
 100644
--- a/gcc/config/aarch64/aarch64-builtins.cc
+++ b/gcc/config/aarch64/aarch64-builtins.cc
@@ -133,6 +133,7 @@
 #define MODE_d_f16 E_V4HFmode
 #define MODE_d_f32 E_V2SFmode
 #define MODE_d_f64 E_V1DFmode
+#define MODE_d_mf8 E_V8QImode
 #define MODE_d_s8 E_V8QImode
 #define MODE_d_s16 E_V4HImode
 #define MODE_d_s32 E_V2SImode
@@ -148,6 +149,7 @@
 #define MODE_q_f16 E_V8HFmode
 #define MODE_q_f32 E_V4SFmode
 #define MODE_q_f64 E_V2DFmode
+#define MODE_q_mf8 E_V16QImode
 #define MODE_q_s8 E_V16QImode
 #define MODE_q_s16 E_V8HImode
 #define MODE_q_s32 E_V4SImode
@@ -177,6 +179,7 @@
 #define QUAL_p16 qualifier_poly
 #define QUAL_p64 qualifier_poly
 #define QUAL_p128 qualifier_poly
+#define QUAL_mf8 qualifier_modal_float
 
 #define LENGTH_d ""
 #define LENGTH_q "q"
@@ -598,6 +601,7 @@ static aarch64_simd_builtin_datum 
aarch64_simd_builtin_data[] = {
 /* vreinterpret intrinsics are defined for any pair of element types.
    {     _bf16           }   {     _bf16           }
    {      _f16 _f32 _f64 }   {      _f16 _f32 _f64 }
+   { _mf8                }   { _mf8                }
    { _s8  _s16 _s32 _s64 } x { _s8  _s16 _s32 _s64 }
    { _u8  _u16 _u32 _u64 }   { _u8  _u16 _u32 _u64 }
    { _p8  _p16      _p64 }   { _p8  _p16      _p64 }.  */
@@ -609,6 +613,7 @@ static aarch64_simd_builtin_datum 
aarch64_simd_builtin_data[] = {
   VREINTERPRET_BUILTIN2 (A, f16) \
   VREINTERPRET_BUILTIN2 (A, f32) \
   VREINTERPRET_BUILTIN2 (A, f64) \
+  VREINTERPRET_BUILTIN2 (A, mf8) \
   VREINTERPRET_BUILTIN2 (A, s8) \
   VREINTERPRET_BUILTIN2 (A, s16) \
   VREINTERPRET_BUILTIN2 (A, s32) \
@@ -626,6 +631,7 @@ static aarch64_simd_builtin_datum 
aarch64_simd_builtin_data[] = {
   VREINTERPRET_BUILTINS1 (f16) \
   VREINTERPRET_BUILTINS1 (f32) \
   VREINTERPRET_BUILTINS1 (f64) \
+  VREINTERPRET_BUILTINS1 (mf8) \
   VREINTERPRET_BUILTINS1 (s8) \
   VREINTERPRET_BUILTINS1 (s16) \
   VREINTERPRET_BUILTINS1 (s32) \
@@ -641,6 +647,7 @@ static aarch64_simd_builtin_datum 
aarch64_simd_builtin_data[] = {
 /* vreinterpretq intrinsics are additionally defined for p128.
    {     _bf16                 }   {     _bf16                 }
    {      _f16 _f32 _f64       }   {      _f16 _f32 _f64       }
+   { _mf8                      }   { _mf8                      }
    { _s8  _s16 _s32 _s64       } x { _s8  _s16 _s32 _s64       }
    { _u8  _u16 _u32 _u64       }   { _u8  _u16 _u32 _u64       }
    { _p8  _p16      _p64 _p128 }   { _p8  _p16      _p64 _p128 }.  */
@@ -652,6 +659,7 @@ static aarch64_simd_builtin_datum 
aarch64_simd_builtin_data[] = {
   VREINTERPRETQ_BUILTIN2 (A, f16) \
   VREINTERPRETQ_BUILTIN2 (A, f32) \
   VREINTERPRETQ_BUILTIN2 (A, f64) \
+  VREINTERPRETQ_BUILTIN2 (A, mf8) \
   VREINTERPRETQ_BUILTIN2 (A, s8) \
   VREINTERPRETQ_BUILTIN2 (A, s16) \
   VREINTERPRETQ_BUILTIN2 (A, s32) \
@@ -670,6 +678,7 @@ static aarch64_simd_builtin_datum 
aarch64_simd_builtin_data[] = {
   VREINTERPRETQ_BUILTINS1 (f16) \
   VREINTERPRETQ_BUILTINS1 (f32) \
   VREINTERPRETQ_BUILTINS1 (f64) \
+  VREINTERPRETQ_BUILTINS1 (mf8) \
   VREINTERPRETQ_BUILTINS1 (s8) \
   VREINTERPRETQ_BUILTINS1 (s16) \
   VREINTERPRETQ_BUILTINS1 (s32) \
@@ -1117,7 +1126,8 @@ aarch64_lookup_simd_type_in_table (machine_mode mode,
 {
   int i;
   int nelts = ARRAY_SIZE (aarch64_simd_types);
-  int q = qualifiers & (qualifier_poly | qualifier_unsigned);
+  int q = qualifiers
+    & (qualifier_poly | qualifier_unsigned | qualifier_modal_float);
 
   for (i = 0; i < nelts; i++)
     {
diff --git 
a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/mf8-reinterpret.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/mf8-reinterpret.c
new file mode 100644
index 
0000000000000000000000000000000000000000..5e5921746036bbfbf20d2a77697760efd1f71cc2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/mf8-reinterpret.c
@@ -0,0 +1,46 @@
+/* { dg-do compile { target { aarch64*-*-* } } } */
+
+#include <arm_neon.h>
+
+#define TEST_128(T, S)                       \
+T test_vreinterpretq_##S##_mf8 (mfloat8x16_t a)\
+{                                            \
+  return vreinterpretq_##S##_mf8 (a);        \
+}                                            \
+                                             \
+mfloat8x16_t test_vreinterpretq_mf8_##S (T a) \
+{                                            \
+  return vreinterpretq_mf8_##S (a);          \
+}
+
+
+#define TEST_BOTH(T1, T2, S)                 \
+TEST_128(T2, S)                                      \
+T1 test_vreinterpret_##S##_mf8 (mfloat8x8_t a) \
+{                                            \
+  return vreinterpret_##S##_mf8 (a);         \
+}                                            \
+                                             \
+mfloat8x8_t test_vreinterpret_mf8_##S (T1 a)  \
+{                                            \
+  return vreinterpret_mf8_##S (a);           \
+}
+
+TEST_BOTH(bfloat16x4_t, bfloat16x8_t, bf16)
+TEST_BOTH(float16x4_t, float16x8_t, f16)
+TEST_BOTH(float32x2_t, float32x4_t, f32)
+TEST_BOTH(float64x1_t, float64x2_t, f64)
+TEST_BOTH(poly8x8_t, poly8x16_t, p8)
+TEST_BOTH(poly16x4_t, poly16x8_t, p16)
+TEST_BOTH(poly64x1_t, poly64x2_t, p64)
+TEST_128(poly128_t, p128)
+TEST_BOTH(int8x8_t, int8x16_t, s8)
+TEST_BOTH(int16x4_t, int16x8_t, s16)
+TEST_BOTH(int32x2_t, int32x4_t, s32)
+TEST_BOTH(int64x1_t, int64x2_t, s64)
+TEST_BOTH(uint8x8_t, uint8x16_t, u8)
+TEST_BOTH(uint16x4_t, uint16x8_t, u16)
+TEST_BOTH(uint32x2_t, uint32x4_t, u32)
+TEST_BOTH(uint64x1_t, uint64x2_t, u64)
+
+

Reply via email to