Hello, This patch supports following MVE ACLE "add with carry across beats" intrinsics and "beat-wise substract" intrinsics.
vadciq_s32, vadciq_u32, vadciq_m_s32, vadciq_m_u32, vadcq_s32, vadcq_u32, vadcq_m_s32, vadcq_m_u32, vsbciq_s32, vsbciq_u32, vsbciq_m_s32, vsbciq_m_u32, vsbcq_s32, vsbcq_u32, vsbcq_m_s32, vsbcq_m_u32. Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details. [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics Regression tested on arm-none-eabi and found no regressions. Ok for trunk? Thanks, Srinath. gcc/ChangeLog: 2019-11-08 Andre Vieira <andre.simoesdiasvie...@arm.com> Mihail Ionescu <mihail.ione...@arm.com> Srinath Parvathaneni <srinath.parvathan...@arm.com> * config/arm/arm_mve.h (vadciq_s32): Define macro. (vadciq_u32): Likewise. (vadciq_m_s32): Likewise. (vadciq_m_u32): Likewise. (vadcq_s32): Likewise. (vadcq_u32): Likewise. (vadcq_m_s32): Likewise. (vadcq_m_u32): Likewise. (vsbciq_s32): Likewise. (vsbciq_u32): Likewise. (vsbciq_m_s32): Likewise. (vsbciq_m_u32): Likewise. (vsbcq_s32): Likewise. (vsbcq_u32): Likewise. (vsbcq_m_s32): Likewise. (vsbcq_m_u32): Likewise. (__arm_vadciq_s32): Define intrinsic. (__arm_vadciq_u32): Likewise. (__arm_vadciq_m_s32): Likewise. (__arm_vadciq_m_u32): Likewise. (__arm_vadcq_s32): Likewise. (__arm_vadcq_u32): Likewise. (__arm_vadcq_m_s32): Likewise. (__arm_vadcq_m_u32): Likewise. (__arm_vsbciq_s32): Likewise. (__arm_vsbciq_u32): Likewise. (__arm_vsbciq_m_s32): Likewise. (__arm_vsbciq_m_u32): Likewise. (__arm_vsbcq_s32): Likewise. (__arm_vsbcq_u32): Likewise. (__arm_vsbcq_m_s32): Likewise. (__arm_vsbcq_m_u32): Likewise. (vadciq_m): Define polymorphic variant. (vadciq): Likewise. (vadcq_m): Likewise. (vadcq): Likewise. (vsbciq_m): Likewise. (vsbciq): Likewise. (vsbcq_m): Likewise. (vsbcq): Likewise. * config/arm/arm_mve_builtins.def (BINOP_NONE_NONE_NONE): Use builtin qualifier. (BINOP_UNONE_UNONE_UNONE): Likewise. (QUADOP_NONE_NONE_NONE_NONE_UNONE): Likewise. (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE): Likewise. * config/arm/mve.md (VADCIQ): Define iterator. (VADCIQ_M): Likewise. (VSBCQ): Likewise. (VSBCQ_M): Likewise. (VSBCIQ): Likewise. (VSBCIQ_M): Likewise. (VADCQ): Likewise. (VADCQ_M): Likewise. (mve_vadciq_m_<supf>v4si): Define RTL pattern. (mve_vadciq_<supf>v4si): Likewise. (mve_vadcq_m_<supf>v4si): Likewise. (mve_vadcq_<supf>v4si): Likewise. (mve_vsbciq_m_<supf>v4si): Likewise. (mve_vsbciq_<supf>v4si): Likewise. (mve_vsbcq_m_<supf>v4si): Likewise. (mve_vsbcq_<supf>v4si): Likewise. gcc/testsuite/ChangeLog: 2019-11-08 Andre Vieira <andre.simoesdiasvie...@arm.com> Mihail Ionescu <mihail.ione...@arm.com> Srinath Parvathaneni <srinath.parvathan...@arm.com> * gcc.target/arm/mve/intrinsics/vadciq_m_s32.c: New test. * gcc.target/arm/mve/intrinsics/vadciq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vadciq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vadciq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vadcq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vadcq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vadcq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vadcq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsbciq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsbciq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsbcq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsbcq_u32.c: Likewise. ############### Attachment also inlined for ease of reply ############### diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index 31ad3fc5cddfedede02b10e194a426a98bd13024..1704b622c5d6e0abcf814ae1d439bb732f0bd76e 100644 --- a/gcc/config/arm/arm_mve.h +++ b/gcc/config/arm/arm_mve.h @@ -2450,6 +2450,22 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t; #define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p) #define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p) #define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p) +#define vadciq_s32(__a, __b, __carry_out) __arm_vadciq_s32(__a, __b, __carry_out) +#define vadciq_u32(__a, __b, __carry_out) __arm_vadciq_u32(__a, __b, __carry_out) +#define vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) +#define vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) +#define vadcq_s32(__a, __b, __carry) __arm_vadcq_s32(__a, __b, __carry) +#define vadcq_u32(__a, __b, __carry) __arm_vadcq_u32(__a, __b, __carry) +#define vadcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b, __carry, __p) +#define vadcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_u32(__inactive, __a, __b, __carry, __p) +#define vsbciq_s32(__a, __b, __carry_out) __arm_vsbciq_s32(__a, __b, __carry_out) +#define vsbciq_u32(__a, __b, __carry_out) __arm_vsbciq_u32(__a, __b, __carry_out) +#define vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) +#define vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) +#define vsbcq_s32(__a, __b, __carry) __arm_vsbcq_s32(__a, __b, __carry) +#define vsbcq_u32(__a, __b, __carry) __arm_vsbcq_u32(__a, __b, __carry) +#define vsbcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_s32(__inactive, __a, __b, __carry, __p) +#define vsbcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_u32(__inactive, __a, __b, __carry, __p) #endif __extension__ extern __inline void @@ -15917,6 +15933,158 @@ __arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) return __builtin_mve_vshrq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); } +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) +{ + int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) +{ + uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + int32x4_t __res = __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) +{ + int32x4_t __res = __builtin_mve_vsbciq_sv4si (__a, __b); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) +{ + uint32x4_t __res = __builtin_mve_vsbciq_uv4si (__a, __b); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + int32x4_t __res = __builtin_mve_vsbciq_m_sv4si (__inactive, __a, __b, __p); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + uint32x4_t __res = __builtin_mve_vsbciq_m_uv4si (__inactive, __a, __b, __p); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ __extension__ extern __inline void @@ -25552,6 +25720,65 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) +#define vadciq_m(p0,p1,p2,p3,p4) __arm_vadciq_m(p0,p1,p2,p3,p4) +#define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) + +#define vadciq(p0,p1,p2) __arm_vadciq(p0,p1,p2) +#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vadcq_m(p0,p1,p2,p3,p4) __arm_vadcq_m(p0,p1,p2,p3,p4) +#define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) + +#define vadcq(p0,p1,p2) __arm_vadcq(p0,p1,p2) +#define __arm_vadcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vsbciq_m(p0,p1,p2,p3,p4) __arm_vsbciq_m(p0,p1,p2,p3,p4) +#define __arm_vsbciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) + +#define vsbciq(p0,p1,p2) __arm_vsbciq(p0,p1,p2) +#define __arm_vsbciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vsbcq_m(p0,p1,p2,p3,p4) __arm_vsbcq_m(p0,p1,p2,p3,p4) +#define __arm_vsbcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) + +#define vsbcq(p0,p1,p2) __arm_vsbcq(p0,p1,p2) +#define __arm_vsbcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) #endif /* MVE Floating point. */ diff --git a/gcc/config/arm/arm_mve_builtins.def b/gcc/config/arm/arm_mve_builtins.def index b77335cff133872558b48b5574dccc0f17df9ed1..a413b38676f2f102c16fdf2147f3b8a4d8ec47b4 100644 --- a/gcc/config/arm/arm_mve_builtins.def +++ b/gcc/config/arm/arm_mve_builtins.def @@ -857,3 +857,19 @@ VAR1 (LDRGBWBS_Z, vldrdq_gather_base_wb_z_s, v2di) VAR1 (LDRGBWBS, vldrwq_gather_base_wb_s, v4si) VAR1 (LDRGBWBS, vldrwq_gather_base_wb_f, v4sf) VAR1 (LDRGBWBS, vldrdq_gather_base_wb_s, v2di) +VAR1 (BINOP_NONE_NONE_NONE, vadciq_s, v4si) +VAR1 (BINOP_UNONE_UNONE_UNONE, vadciq_u, v4si) +VAR1 (BINOP_NONE_NONE_NONE, vadcq_s, v4si) +VAR1 (BINOP_UNONE_UNONE_UNONE, vadcq_u, v4si) +VAR1 (BINOP_NONE_NONE_NONE, vsbciq_s, v4si) +VAR1 (BINOP_UNONE_UNONE_UNONE, vsbciq_u, v4si) +VAR1 (BINOP_NONE_NONE_NONE, vsbcq_s, v4si) +VAR1 (BINOP_UNONE_UNONE_UNONE, vsbcq_u, v4si) +VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vadciq_m_s, v4si) +VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vadciq_m_u, v4si) +VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vadcq_m_s, v4si) +VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vadcq_m_u, v4si) +VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vsbciq_m_s, v4si) +VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vsbciq_m_u, v4si) +VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vsbcq_m_s, v4si) +VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vsbcq_m_u, v4si) diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md index a938e0922f8dc6749dc7192961ae2091d666c6e7..8ff69094378396830ef31d9e2ca9db71c58aefab 100644 --- a/gcc/config/arm/mve.md +++ b/gcc/config/arm/mve.md @@ -211,7 +211,10 @@ VDWDUPQ_M VIDUPQ VIDUPQ_M VIWDUPQ VIWDUPQ_M VSTRWQSBWB_S VSTRWQSBWB_U VLDRWQGBWB_S VLDRWQGBWB_U VSTRWQSBWB_F VLDRWQGBWB_F VSTRDQSBWB_S VSTRDQSBWB_U - VLDRDQGBWB_S VLDRDQGBWB_U]) + VLDRDQGBWB_S VLDRDQGBWB_U VADCQ_U VADCQ_M_U VADCQ_S + VADCQ_M_S VSBCIQ_U VSBCIQ_S VSBCIQ_M_U VSBCIQ_M_S + VSBCQ_U VSBCQ_S VSBCQ_M_U VSBCQ_M_S VADCIQ_U VADCIQ_M_U + VADCIQ_S VADCIQ_M_S]) (define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF") (V8HF "V8HI") (V4SF "V4SI")]) @@ -382,8 +385,13 @@ (VSTRWQSO_U "u") (VSTRWQSO_S "s") (VSTRWQSSO_U "u") (VSTRWQSSO_S "s") (VSTRWQSBWB_S "s") (VSTRWQSBWB_U "u") (VLDRWQGBWB_S "s") (VLDRWQGBWB_U "u") (VLDRDQGBWB_S "s") - (VLDRDQGBWB_U "u") (VSTRDQSBWB_S "s") - (VSTRDQSBWB_U "u")]) + (VLDRDQGBWB_U "u") (VSTRDQSBWB_S "s") (VADCQ_M_S "s") + (VSTRDQSBWB_U "u") (VSBCQ_U "u") (VSBCQ_M_U "u") + (VSBCQ_S "s") (VSBCQ_M_S "s") (VSBCIQ_U "u") + (VSBCIQ_M_U "u") (VSBCIQ_S "s") (VSBCIQ_M_S "s") + (VADCQ_U "u") (VADCQ_M_U "u") (VADCQ_S "s") + (VADCIQ_U "u") (VADCIQ_M_U "u") (VADCIQ_S "s") + (VADCIQ_M_S "s")]) (define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32") (VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16") @@ -636,6 +644,15 @@ (define_int_iterator VLDRWGBWBQ [VLDRWQGBWB_S VLDRWQGBWB_U]) (define_int_iterator VSTRDSBWBQ [VSTRDQSBWB_S VSTRDQSBWB_U]) (define_int_iterator VLDRDGBWBQ [VLDRDQGBWB_S VLDRDQGBWB_U]) +(define_int_iterator VADCIQ [VADCIQ_U VADCIQ_S]) +(define_int_iterator VADCIQ_M [VADCIQ_M_U VADCIQ_M_S]) +(define_int_iterator VSBCQ [VSBCQ_U VSBCQ_S]) +(define_int_iterator VSBCQ_M [VSBCQ_M_U VSBCQ_M_S]) +(define_int_iterator VSBCIQ [VSBCIQ_U VSBCIQ_S]) +(define_int_iterator VSBCIQ_M [VSBCIQ_M_U VSBCIQ_M_S]) +(define_int_iterator VADCQ [VADCQ_U VADCQ_S]) +(define_int_iterator VADCQ_M [VADCQ_M_U VADCQ_M_S]) + (define_insn "*mve_mov<mode>" [(set (match_operand:MVE_types 0 "s_register_operand" "=w,w,r,w,w,r,w") @@ -10614,3 +10631,147 @@ return ""; } [(set_attr "length" "8")]) +;; +;; [vadciq_m_s, vadciq_m_u]) +;; +(define_insn "mve_vadciq_m_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "0") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:V4SI 3 "s_register_operand" "w") + (match_operand:HI 4 "vpr_register_operand" "Up")] + VADCIQ_M)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(const_int 0)] + VADCIQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vadcit.i32\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") + (set_attr "length" "8")]) + +;; +;; [vadciq_u, vadciq_s]) +;; +(define_insn "mve_vadciq_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VADCIQ)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(const_int 0)] + VADCIQ)) + ] + "TARGET_HAVE_MVE" + "vadci.i32\t%q0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length" "4")]) + +;; +;; [vadcq_m_s, vadcq_m_u]) +;; +(define_insn "mve_vadcq_m_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "0") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:V4SI 3 "s_register_operand" "w") + (match_operand:HI 4 "vpr_register_operand" "Up")] + VADCQ_M)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(reg:SI VFPCC_REGNUM)] + VADCQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vadct.i32\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") + (set_attr "length" "8")]) + +;; +;; [vadcq_u, vadcq_s]) +;; +(define_insn "mve_vadcq_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VADCQ)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(reg:SI VFPCC_REGNUM)] + VADCQ)) + ] + "TARGET_HAVE_MVE" + "vadc.i32\t%q0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length" "4") + (set_attr "conds" "set")]) + +;; +;; [vsbciq_m_u, vsbciq_m_s]) +;; +(define_insn "mve_vsbciq_m_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:V4SI 3 "s_register_operand" "w") + (match_operand:HI 4 "vpr_register_operand" "Up")] + VSBCIQ_M)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(const_int 0)] + VSBCIQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vsbcit.i32\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") + (set_attr "length" "8")]) + +;; +;; [vsbciq_s, vsbciq_u]) +;; +(define_insn "mve_vsbciq_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VSBCIQ)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(const_int 0)] + VSBCIQ)) + ] + "TARGET_HAVE_MVE" + "vsbci.i32\t%q0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length" "4")]) + +;; +;; [vsbcq_m_u, vsbcq_m_s]) +;; +(define_insn "mve_vsbcq_m_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:V4SI 3 "s_register_operand" "w") + (match_operand:HI 4 "vpr_register_operand" "Up")] + VSBCQ_M)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(reg:SI VFPCC_REGNUM)] + VSBCQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vsbct.i32\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") + (set_attr "length" "8")]) + +;; +;; [vsbcq_s, vsbcq_u]) +;; +(define_insn "mve_vsbcq_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VSBCQ)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(reg:SI VFPCC_REGNUM)] + VSBCQ)) + ] + "TARGET_HAVE_MVE" + "vsbc.i32\t%q0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length" "4")]) diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..51cedf3c5421241b0a2a1d8473e07172669a2043 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_s32.c @@ -0,0 +1,26 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vadciq_m_s32 (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vadciq_m (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..b46f0a7d44d8bf77105ffcfc59cbb24aa4eaa658 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_u32.c @@ -0,0 +1,26 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vadciq_m_u32 (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vadciq_m (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..124bff6dbb225edd156390411db2b4e18643f256 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, unsigned * carry_out) +{ + return vadciq_s32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vadci.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, unsigned * carry_out) +{ + return vadciq (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vadci.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..0718570130caa4cb88085f8a11ab92a72522cec7 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, unsigned * carry_out) +{ + return vadciq_u32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vadci.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry_out) +{ + return vadciq (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vadci.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..1c2a928d52ff04c7341f043c15123386453c66d5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_s32.c @@ -0,0 +1,28 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vadcq_m_s32 (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vadcq_m (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..af38e01b7c8fd180311afbb6ee05c758ec2e636e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_u32.c @@ -0,0 +1,29 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vadcq_m_u32 (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vadcq_m (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..35be2d6aa2e31628a162bd9456be0844c861bb6a --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, unsigned * carry) +{ + return vadcq_s32 (a, b, carry); +} + +/* { dg-final { scan-assembler "vadc.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, unsigned * carry) +{ + return vadcq (a, b, carry); +} + +/* { dg-final { scan-assembler "vadc.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..9a8246318b6fad157cbb9b74379e2f760fe71ff6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, unsigned * carry) +{ + return vadcq_u32 (a, b, carry); +} + +/* { dg-final { scan-assembler "vadc.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry) +{ + return vadcq (a, b, carry); +} + +/* { dg-final { scan-assembler "vadc.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..c353a51080d9a15f033562636ffde3cb81e39b36 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c @@ -0,0 +1,26 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vsbciq_m_s32 (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vsbciq_m (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..e2bddb737c866c86a0ba0ac6fb5de1eeb7206c69 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c @@ -0,0 +1,27 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vsbciq_m_u32 (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vsbciq_m (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ + diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..db32b9cef100c1ec8b8fa75d946e9423e7dc6669 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, unsigned * carry_out) +{ + return vsbciq_s32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vsbci.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, unsigned * carry_out) +{ + return vsbciq_s32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vsbci.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..60f213d9a631d7d12f8ab7dfe58e3c251b9e8854 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, unsigned * carry_out) +{ + return vsbciq_u32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vsbci.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry_out) +{ + return vsbciq_u32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vsbci.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..4ab7e2f1f69ade2189c44bc9bcd6243dc3cd9d68 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vsbcq_m_s32 (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mcr" 1 } } */ + diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..da2edac3d95d5405226bd0b85a9dce42ad62fd9b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vsbcq_m_u32 (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mcr" 1 } } */ + diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..9c3e6e99f5938d523d5dee127300031ddc09ee33 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_s32.c @@ -0,0 +1,24 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, unsigned * carry) +{ + return vsbcq_s32 (a, b, carry); +} + +/* { dg-final { scan-assembler "vsbc.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, unsigned * carry) +{ + return vsbcq (a, b, carry); +} + +/* { dg-final { scan-assembler "vsbc.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ + diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..122b20c41b34c906c00b9914fe152195a3739118 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, unsigned * carry) +{ + return vsbcq_u32 (a, b, carry); +} + +/* { dg-final { scan-assembler "vsbc.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry) +{ + return vsbcq (a, b, carry); +} + +/* { dg-final { scan-assembler "vsbc.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index 31ad3fc5cddfedede02b10e194a426a98bd13024..1704b622c5d6e0abcf814ae1d439bb732f0bd76e 100644 --- a/gcc/config/arm/arm_mve.h +++ b/gcc/config/arm/arm_mve.h @@ -2450,6 +2450,22 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t; #define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p) #define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p) #define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p) +#define vadciq_s32(__a, __b, __carry_out) __arm_vadciq_s32(__a, __b, __carry_out) +#define vadciq_u32(__a, __b, __carry_out) __arm_vadciq_u32(__a, __b, __carry_out) +#define vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) +#define vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) +#define vadcq_s32(__a, __b, __carry) __arm_vadcq_s32(__a, __b, __carry) +#define vadcq_u32(__a, __b, __carry) __arm_vadcq_u32(__a, __b, __carry) +#define vadcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b, __carry, __p) +#define vadcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_u32(__inactive, __a, __b, __carry, __p) +#define vsbciq_s32(__a, __b, __carry_out) __arm_vsbciq_s32(__a, __b, __carry_out) +#define vsbciq_u32(__a, __b, __carry_out) __arm_vsbciq_u32(__a, __b, __carry_out) +#define vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) +#define vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) +#define vsbcq_s32(__a, __b, __carry) __arm_vsbcq_s32(__a, __b, __carry) +#define vsbcq_u32(__a, __b, __carry) __arm_vsbcq_u32(__a, __b, __carry) +#define vsbcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_s32(__inactive, __a, __b, __carry, __p) +#define vsbcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_u32(__inactive, __a, __b, __carry, __p) #endif __extension__ extern __inline void @@ -15917,6 +15933,158 @@ __arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p) return __builtin_mve_vshrq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p); } +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) +{ + int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) +{ + uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + int32x4_t __res = __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) +{ + int32x4_t __res = __builtin_mve_vsbciq_sv4si (__a, __b); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) +{ + uint32x4_t __res = __builtin_mve_vsbciq_uv4si (__a, __b); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + int32x4_t __res = __builtin_mve_vsbciq_m_sv4si (__inactive, __a, __b, __p); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + uint32x4_t __res = __builtin_mve_vsbciq_m_uv4si (__inactive, __a, __b, __p); + *__carry_out = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + __builtin_arm_set_fpscr((__builtin_arm_get_fpscr () & ~0x20000000u) | (*__carry << 29)); + uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p); + *__carry = (__builtin_arm_get_fpscr () >> 29) & 0x1u; + return __res; +} + #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ __extension__ extern __inline void @@ -25552,6 +25720,65 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) +#define vadciq_m(p0,p1,p2,p3,p4) __arm_vadciq_m(p0,p1,p2,p3,p4) +#define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) + +#define vadciq(p0,p1,p2) __arm_vadciq(p0,p1,p2) +#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vadcq_m(p0,p1,p2,p3,p4) __arm_vadcq_m(p0,p1,p2,p3,p4) +#define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) + +#define vadcq(p0,p1,p2) __arm_vadcq(p0,p1,p2) +#define __arm_vadcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vsbciq_m(p0,p1,p2,p3,p4) __arm_vsbciq_m(p0,p1,p2,p3,p4) +#define __arm_vsbciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) + +#define vsbciq(p0,p1,p2) __arm_vsbciq(p0,p1,p2) +#define __arm_vsbciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vsbcq_m(p0,p1,p2,p3,p4) __arm_vsbcq_m(p0,p1,p2,p3,p4) +#define __arm_vsbcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) + +#define vsbcq(p0,p1,p2) __arm_vsbcq(p0,p1,p2) +#define __arm_vsbcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) #endif /* MVE Floating point. */ diff --git a/gcc/config/arm/arm_mve_builtins.def b/gcc/config/arm/arm_mve_builtins.def index b77335cff133872558b48b5574dccc0f17df9ed1..a413b38676f2f102c16fdf2147f3b8a4d8ec47b4 100644 --- a/gcc/config/arm/arm_mve_builtins.def +++ b/gcc/config/arm/arm_mve_builtins.def @@ -857,3 +857,19 @@ VAR1 (LDRGBWBS_Z, vldrdq_gather_base_wb_z_s, v2di) VAR1 (LDRGBWBS, vldrwq_gather_base_wb_s, v4si) VAR1 (LDRGBWBS, vldrwq_gather_base_wb_f, v4sf) VAR1 (LDRGBWBS, vldrdq_gather_base_wb_s, v2di) +VAR1 (BINOP_NONE_NONE_NONE, vadciq_s, v4si) +VAR1 (BINOP_UNONE_UNONE_UNONE, vadciq_u, v4si) +VAR1 (BINOP_NONE_NONE_NONE, vadcq_s, v4si) +VAR1 (BINOP_UNONE_UNONE_UNONE, vadcq_u, v4si) +VAR1 (BINOP_NONE_NONE_NONE, vsbciq_s, v4si) +VAR1 (BINOP_UNONE_UNONE_UNONE, vsbciq_u, v4si) +VAR1 (BINOP_NONE_NONE_NONE, vsbcq_s, v4si) +VAR1 (BINOP_UNONE_UNONE_UNONE, vsbcq_u, v4si) +VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vadciq_m_s, v4si) +VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vadciq_m_u, v4si) +VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vadcq_m_s, v4si) +VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vadcq_m_u, v4si) +VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vsbciq_m_s, v4si) +VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vsbciq_m_u, v4si) +VAR1 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vsbcq_m_s, v4si) +VAR1 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vsbcq_m_u, v4si) diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md index a938e0922f8dc6749dc7192961ae2091d666c6e7..8ff69094378396830ef31d9e2ca9db71c58aefab 100644 --- a/gcc/config/arm/mve.md +++ b/gcc/config/arm/mve.md @@ -211,7 +211,10 @@ VDWDUPQ_M VIDUPQ VIDUPQ_M VIWDUPQ VIWDUPQ_M VSTRWQSBWB_S VSTRWQSBWB_U VLDRWQGBWB_S VLDRWQGBWB_U VSTRWQSBWB_F VLDRWQGBWB_F VSTRDQSBWB_S VSTRDQSBWB_U - VLDRDQGBWB_S VLDRDQGBWB_U]) + VLDRDQGBWB_S VLDRDQGBWB_U VADCQ_U VADCQ_M_U VADCQ_S + VADCQ_M_S VSBCIQ_U VSBCIQ_S VSBCIQ_M_U VSBCIQ_M_S + VSBCQ_U VSBCQ_S VSBCQ_M_U VSBCQ_M_S VADCIQ_U VADCIQ_M_U + VADCIQ_S VADCIQ_M_S]) (define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF") (V8HF "V8HI") (V4SF "V4SI")]) @@ -382,8 +385,13 @@ (VSTRWQSO_U "u") (VSTRWQSO_S "s") (VSTRWQSSO_U "u") (VSTRWQSSO_S "s") (VSTRWQSBWB_S "s") (VSTRWQSBWB_U "u") (VLDRWQGBWB_S "s") (VLDRWQGBWB_U "u") (VLDRDQGBWB_S "s") - (VLDRDQGBWB_U "u") (VSTRDQSBWB_S "s") - (VSTRDQSBWB_U "u")]) + (VLDRDQGBWB_U "u") (VSTRDQSBWB_S "s") (VADCQ_M_S "s") + (VSTRDQSBWB_U "u") (VSBCQ_U "u") (VSBCQ_M_U "u") + (VSBCQ_S "s") (VSBCQ_M_S "s") (VSBCIQ_U "u") + (VSBCIQ_M_U "u") (VSBCIQ_S "s") (VSBCIQ_M_S "s") + (VADCQ_U "u") (VADCQ_M_U "u") (VADCQ_S "s") + (VADCIQ_U "u") (VADCIQ_M_U "u") (VADCIQ_S "s") + (VADCIQ_M_S "s")]) (define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32") (VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16") @@ -636,6 +644,15 @@ (define_int_iterator VLDRWGBWBQ [VLDRWQGBWB_S VLDRWQGBWB_U]) (define_int_iterator VSTRDSBWBQ [VSTRDQSBWB_S VSTRDQSBWB_U]) (define_int_iterator VLDRDGBWBQ [VLDRDQGBWB_S VLDRDQGBWB_U]) +(define_int_iterator VADCIQ [VADCIQ_U VADCIQ_S]) +(define_int_iterator VADCIQ_M [VADCIQ_M_U VADCIQ_M_S]) +(define_int_iterator VSBCQ [VSBCQ_U VSBCQ_S]) +(define_int_iterator VSBCQ_M [VSBCQ_M_U VSBCQ_M_S]) +(define_int_iterator VSBCIQ [VSBCIQ_U VSBCIQ_S]) +(define_int_iterator VSBCIQ_M [VSBCIQ_M_U VSBCIQ_M_S]) +(define_int_iterator VADCQ [VADCQ_U VADCQ_S]) +(define_int_iterator VADCQ_M [VADCQ_M_U VADCQ_M_S]) + (define_insn "*mve_mov<mode>" [(set (match_operand:MVE_types 0 "s_register_operand" "=w,w,r,w,w,r,w") @@ -10614,3 +10631,147 @@ return ""; } [(set_attr "length" "8")]) +;; +;; [vadciq_m_s, vadciq_m_u]) +;; +(define_insn "mve_vadciq_m_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "0") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:V4SI 3 "s_register_operand" "w") + (match_operand:HI 4 "vpr_register_operand" "Up")] + VADCIQ_M)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(const_int 0)] + VADCIQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vadcit.i32\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") + (set_attr "length" "8")]) + +;; +;; [vadciq_u, vadciq_s]) +;; +(define_insn "mve_vadciq_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VADCIQ)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(const_int 0)] + VADCIQ)) + ] + "TARGET_HAVE_MVE" + "vadci.i32\t%q0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length" "4")]) + +;; +;; [vadcq_m_s, vadcq_m_u]) +;; +(define_insn "mve_vadcq_m_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "0") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:V4SI 3 "s_register_operand" "w") + (match_operand:HI 4 "vpr_register_operand" "Up")] + VADCQ_M)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(reg:SI VFPCC_REGNUM)] + VADCQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vadct.i32\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") + (set_attr "length" "8")]) + +;; +;; [vadcq_u, vadcq_s]) +;; +(define_insn "mve_vadcq_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VADCQ)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(reg:SI VFPCC_REGNUM)] + VADCQ)) + ] + "TARGET_HAVE_MVE" + "vadc.i32\t%q0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length" "4") + (set_attr "conds" "set")]) + +;; +;; [vsbciq_m_u, vsbciq_m_s]) +;; +(define_insn "mve_vsbciq_m_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:V4SI 3 "s_register_operand" "w") + (match_operand:HI 4 "vpr_register_operand" "Up")] + VSBCIQ_M)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(const_int 0)] + VSBCIQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vsbcit.i32\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") + (set_attr "length" "8")]) + +;; +;; [vsbciq_s, vsbciq_u]) +;; +(define_insn "mve_vsbciq_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VSBCIQ)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(const_int 0)] + VSBCIQ)) + ] + "TARGET_HAVE_MVE" + "vsbci.i32\t%q0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length" "4")]) + +;; +;; [vsbcq_m_u, vsbcq_m_s]) +;; +(define_insn "mve_vsbcq_m_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:V4SI 3 "s_register_operand" "w") + (match_operand:HI 4 "vpr_register_operand" "Up")] + VSBCQ_M)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(reg:SI VFPCC_REGNUM)] + VSBCQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vsbct.i32\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") + (set_attr "length" "8")]) + +;; +;; [vsbcq_s, vsbcq_u]) +;; +(define_insn "mve_vsbcq_<supf>v4si" + [(set (match_operand:V4SI 0 "s_register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VSBCQ)) + (set (reg:SI VFPCC_REGNUM) + (unspec:SI [(reg:SI VFPCC_REGNUM)] + VSBCQ)) + ] + "TARGET_HAVE_MVE" + "vsbc.i32\t%q0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length" "4")]) diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..51cedf3c5421241b0a2a1d8473e07172669a2043 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_s32.c @@ -0,0 +1,26 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vadciq_m_s32 (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vadciq_m (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..b46f0a7d44d8bf77105ffcfc59cbb24aa4eaa658 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_m_u32.c @@ -0,0 +1,26 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vadciq_m_u32 (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vadciq_m (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..124bff6dbb225edd156390411db2b4e18643f256 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, unsigned * carry_out) +{ + return vadciq_s32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vadci.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, unsigned * carry_out) +{ + return vadciq (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vadci.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..0718570130caa4cb88085f8a11ab92a72522cec7 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadciq_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, unsigned * carry_out) +{ + return vadciq_u32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vadci.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry_out) +{ + return vadciq (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vadci.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..1c2a928d52ff04c7341f043c15123386453c66d5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_s32.c @@ -0,0 +1,28 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vadcq_m_s32 (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vadcq_m (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..af38e01b7c8fd180311afbb6ee05c758ec2e636e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_m_u32.c @@ -0,0 +1,29 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vadcq_m_u32 (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vadcq_m (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vadct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..35be2d6aa2e31628a162bd9456be0844c861bb6a --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, unsigned * carry) +{ + return vadcq_s32 (a, b, carry); +} + +/* { dg-final { scan-assembler "vadc.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, unsigned * carry) +{ + return vadcq (a, b, carry); +} + +/* { dg-final { scan-assembler "vadc.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..9a8246318b6fad157cbb9b74379e2f760fe71ff6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vadcq_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, unsigned * carry) +{ + return vadcq_u32 (a, b, carry); +} + +/* { dg-final { scan-assembler "vadc.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry) +{ + return vadcq (a, b, carry); +} + +/* { dg-final { scan-assembler "vadc.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..c353a51080d9a15f033562636ffde3cb81e39b36 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_s32.c @@ -0,0 +1,26 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vsbciq_m_s32 (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vsbciq_m (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..e2bddb737c866c86a0ba0ac6fb5de1eeb7206c69 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_m_u32.c @@ -0,0 +1,27 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vsbciq_m_u32 (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry_out, mve_pred16_t p) +{ + return vsbciq_m (inactive, a, b, carry_out, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbcit.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ + diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..db32b9cef100c1ec8b8fa75d946e9423e7dc6669 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, unsigned * carry_out) +{ + return vsbciq_s32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vsbci.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, unsigned * carry_out) +{ + return vsbciq_s32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vsbci.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..60f213d9a631d7d12f8ab7dfe58e3c251b9e8854 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbciq_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, unsigned * carry_out) +{ + return vsbciq_u32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vsbci.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry_out) +{ + return vsbciq_u32 (a, b, carry_out); +} + +/* { dg-final { scan-assembler "vsbci.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..4ab7e2f1f69ade2189c44bc9bcd6243dc3cd9d68 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_s32.c @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vsbcq_m_s32 (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mcr" 1 } } */ + diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..da2edac3d95d5405226bd0b85a9dce42ad62fd9b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_m_u32.c @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned * carry, mve_pred16_t p) +{ + return vsbcq_m_u32 (inactive, a, b, carry, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vsbct.i32" } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mrc" 2 } } */ +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler-times "mcr" 1 } } */ + diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_s32.c new file mode 100644 index 0000000000000000000000000000000000000000..9c3e6e99f5938d523d5dee127300031ddc09ee33 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_s32.c @@ -0,0 +1,24 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, unsigned * carry) +{ + return vsbcq_s32 (a, b, carry); +} + +/* { dg-final { scan-assembler "vsbc.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, unsigned * carry) +{ + return vsbcq (a, b, carry); +} + +/* { dg-final { scan-assembler "vsbc.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */ + diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_u32.c new file mode 100644 index 0000000000000000000000000000000000000000..122b20c41b34c906c00b9914fe152195a3739118 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsbcq_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */ +/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, unsigned * carry) +{ + return vsbcq_u32 (a, b, carry); +} + +/* { dg-final { scan-assembler "vsbc.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, unsigned * carry) +{ + return vsbcq (a, b, carry); +} + +/* { dg-final { scan-assembler "vsbc.i32" } } */ +/* { dg-final { scan-assembler-times "mrc" 4 } } */ +/* { dg-final { scan-assembler-times "mcr" 2 } } */