Jonathan Wright <jonathan.wri...@arm.com> writes: > Hi, > > This patch declares unsigned and polynomial type-qualified builtins for > vget_low_*/vget_high_* Neon intrinsics. Using these builtins removes > the need for many casts in arm_neon.h. > > Bootstrapped and regression tested on aarch64-none-linux-gnu - no > issues. > > Ok for master? > > Thanks, > Jonathan > > --- > > gcc/ChangeLog: > > 2021-11-10 Jonathan Wright <jonathan.wri...@arm.com> > > * config/aarch64/aarch64-builtins.c (TYPES_UNOPP): Define. > * config/aarch64/aarch64-simd-builtins.def: Declare type- > qualified builtins for vget_low/high. > * config/aarch64/arm_neon.h (vget_low_p8): Use type-qualified > builtin and remove casts. > (vget_low_p16): Likewise. > (vget_low_p64): Likewise. > (vget_low_u8): Likewise. > (vget_low_u16): Likewise. > (vget_low_u32): Likewise. > (vget_low_u64): Likewise. > (vget_high_p8): Likewise. > (vget_high_p16): Likewise. > (vget_high_p64): Likewise. > (vget_high_u8): Likewise. > (vget_high_u16): Likewise. > (vget_high_u32): Likewise. > (vget_high_u64): Likewise. > * config/aarch64/iterators.md (VQ_P): New mode iterator. > > diff --git a/gcc/config/aarch64/aarch64-builtins.c > b/gcc/config/aarch64/aarch64-builtins.c > index > 7abf8747b69591815068709af42598c47d73269e..3edc2f55e571c1a34a24add842c47b130d900cf6 > 100644 > --- a/gcc/config/aarch64/aarch64-builtins.c > +++ b/gcc/config/aarch64/aarch64-builtins.c > @@ -204,6 +204,10 @@ aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS] > = { qualifier_unsigned, qualifier_unsigned }; > #define TYPES_UNOPU (aarch64_types_unopu_qualifiers) > static enum aarch64_type_qualifiers > +aarch64_types_unopp_qualifiers[SIMD_MAX_BUILTIN_ARGS] > + = { qualifier_poly, qualifier_poly }; > +#define TYPES_UNOPP (aarch64_types_unopp_qualifiers) > +static enum aarch64_type_qualifiers > aarch64_types_unopus_qualifiers[SIMD_MAX_BUILTIN_ARGS] > = { qualifier_unsigned, qualifier_none }; > #define TYPES_UNOPUS (aarch64_types_unopus_qualifiers) > diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def > b/gcc/config/aarch64/aarch64-simd-builtins.def > index > ab5f3a098f2047d0f1ba933f4418609678102c3d..08d6bbe635424217687a429709c696c3282feea0 > 100644 > --- a/gcc/config/aarch64/aarch64-simd-builtins.def > +++ b/gcc/config/aarch64/aarch64-simd-builtins.def > @@ -62,8 +62,12 @@ > > /* Implemented by aarch64_get_low<mode>. */ > BUILTIN_VQMOV (UNOP, get_low, 0, AUTO_FP) > + BUILTIN_VQ_I (UNOPU, get_low, 0, NONE) > + BUILTIN_VQ_P (UNOPP, get_low, 0, NONE) > /* Implemented by aarch64_get_high<mode>. */ > BUILTIN_VQMOV (UNOP, get_high, 0, AUTO_FP) > + BUILTIN_VQ_I (UNOPU, get_high, 0, NONE) > + BUILTIN_VQ_P (UNOPP, get_high, 0, NONE) > > /* Implemented by aarch64_<sur>q<r>shl<mode>. */ > BUILTIN_VSDQ_I (BINOP, sqshl, 0, NONE) > diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h > index > c374e90f31546886a519ba270113ccedd4ca7abf..6137d53297863aaad0cad31c7eb6eef24bc4316a > 100644 > --- a/gcc/config/aarch64/arm_neon.h > +++ b/gcc/config/aarch64/arm_neon.h > @@ -5799,21 +5799,21 @@ __extension__ extern __inline poly8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_low_p8 (poly8x16_t __a) > { > - return (poly8x8_t) __builtin_aarch64_get_lowv16qi ((int8x16_t) __a); > + return __builtin_aarch64_get_lowv16qi_pp (__a); > } > > __extension__ extern __inline poly16x4_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_low_p16 (poly16x8_t __a) > { > - return (poly16x4_t) __builtin_aarch64_get_lowv8hi ((int16x8_t) __a); > + return __builtin_aarch64_get_lowv8hi_pp (__a); > } > > __extension__ extern __inline poly64x1_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_low_p64 (poly64x2_t __a) > { > - return (poly64x1_t) __builtin_aarch64_get_lowv2di ((int64x2_t) __a); > + return (poly64x1_t) __builtin_aarch64_get_lowv2di_pp (__a);
I think we could define the intrinsics such that the return cast isn't needed either. poly64x1_t has the same mode (DI) as the scalar type, so it should “just” be a case of using qualifiers to pick the x1 vector type instead of the scalar type. Thanks, Richard > } > > __extension__ extern __inline int8x8_t > @@ -5848,28 +5848,28 @@ __extension__ extern __inline uint8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_low_u8 (uint8x16_t __a) > { > - return (uint8x8_t) __builtin_aarch64_get_lowv16qi ((int8x16_t) __a); > + return __builtin_aarch64_get_lowv16qi_uu (__a); > } > > __extension__ extern __inline uint16x4_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_low_u16 (uint16x8_t __a) > { > - return (uint16x4_t) __builtin_aarch64_get_lowv8hi ((int16x8_t) __a); > + return __builtin_aarch64_get_lowv8hi_uu (__a); > } > > __extension__ extern __inline uint32x2_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_low_u32 (uint32x4_t __a) > { > - return (uint32x2_t) __builtin_aarch64_get_lowv4si ((int32x4_t) __a); > + return __builtin_aarch64_get_lowv4si_uu (__a); > } > > __extension__ extern __inline uint64x1_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_low_u64 (uint64x2_t __a) > { > - return (uint64x1_t) {__builtin_aarch64_get_lowv2di ((int64x2_t) __a)}; > + return (uint64x1_t) {__builtin_aarch64_get_lowv2di_uu (__a)}; > } > > __extension__ extern __inline float16x4_t > @@ -5897,21 +5897,21 @@ __extension__ extern __inline poly8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_high_p8 (poly8x16_t __a) > { > - return (poly8x8_t) __builtin_aarch64_get_highv16qi ((int8x16_t) __a); > + return __builtin_aarch64_get_highv16qi_pp (__a); > } > > __extension__ extern __inline poly16x4_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_high_p16 (poly16x8_t __a) > { > - return (poly16x4_t) __builtin_aarch64_get_highv8hi ((int16x8_t) __a); > + return __builtin_aarch64_get_highv8hi_pp (__a); > } > > __extension__ extern __inline poly64x1_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_high_p64 (poly64x2_t __a) > { > - return (poly64x1_t) __builtin_aarch64_get_highv2di ((int64x2_t) __a); > + return (poly64x1_t) __builtin_aarch64_get_highv2di_pp (__a); > } > > __extension__ extern __inline int8x8_t > @@ -5946,28 +5946,28 @@ __extension__ extern __inline uint8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_high_u8 (uint8x16_t __a) > { > - return (uint8x8_t) __builtin_aarch64_get_highv16qi ((int8x16_t) __a); > + return __builtin_aarch64_get_highv16qi_uu (__a); > } > > __extension__ extern __inline uint16x4_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_high_u16 (uint16x8_t __a) > { > - return (uint16x4_t) __builtin_aarch64_get_highv8hi ((int16x8_t) __a); > + return __builtin_aarch64_get_highv8hi_uu (__a); > } > > __extension__ extern __inline uint32x2_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_high_u32 (uint32x4_t __a) > { > - return (uint32x2_t) __builtin_aarch64_get_highv4si ((int32x4_t) __a); > + return __builtin_aarch64_get_highv4si_uu (__a); > } > > __extension__ extern __inline uint64x1_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vget_high_u64 (uint64x2_t __a) > { > - return (uint64x1_t) {__builtin_aarch64_get_highv2di ((int64x2_t) __a)}; > + return (uint64x1_t) {__builtin_aarch64_get_highv2di_uu (__a)}; > } > > > diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md > index > 1598e19751ff5112a072118a629755272f48e83b..ae32d6b1756d1dd78c28844987c563424a726617 > 100644 > --- a/gcc/config/aarch64/iterators.md > +++ b/gcc/config/aarch64/iterators.md > @@ -122,6 +122,9 @@ > ;; Quad integer vector modes. > (define_mode_iterator VQ_I [V16QI V8HI V4SI V2DI]) > > +;; Quad vector polynomial modes. > +(define_mode_iterator VQ_P [V16QI V8HI V2DI]) > + > ;; VQ without 2 element modes. > (define_mode_iterator VQ_NO2E [V16QI V8HI V4SI V8HF V4SF V8BF]) >