Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the v[q]tbl Neon intrinsics to use RTL
> builtins rather than inline assembly code, allowing for better scheduling
> and optimization.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?

OK, thanks.

Richard

> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-02-12  Jonathan Wright  <jonathan.wri...@arm.com>
>
>       * config/aarch64/aarch64-simd-builtins.def: Add tbl1 builtin
>       generator macros.
>       * config/aarch64/arm_neon.h (vqtbl1_p8): Use RTL builtin
>       instead of inline asm.
>       (vqtbl1_s8): Likewise.
>       (vqtbl1_u8): Likewise.
>       (vqtbl1q_p8): Likewise.
>       (vqtbl1q_s8): Likewise.
>       (vqtbl1q_u8): Likewise.
>       (vtbl1_s8): Likewise.
>       (vtbl1_u8): Likewise.
>       (vtbl1_p8): Likewise.
>       (vtbl2_s8): Likewise.
>       (vtbl2_u8): Likewise.
>       (vtbl2_p8): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def 
> b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 
> 4c55a424233d437184ceaf66b6983b79a907fce4..a5cfb6754456a1e8f0fca57c68b009a53e09789e
>  100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -698,6 +698,10 @@
>    VAR1 (BINOP, tbl3, 0, NONE, v8qi)
>    VAR1 (BINOP, tbl3, 0, NONE, v16qi)
>  
> +  /* Implemented by aarch64_tbl1<mode>.  */
> +  VAR2 (BINOP, tbl1, 0, NONE, v8qi, v16qi)
> +  VAR2 (BINOPU, tbl1, 0, NONE, v8qi, v16qi)
> +
>    /* Implemented by aarch64_qtbl3<mode>.  */
>    VAR1 (BINOP, qtbl3, 0, NONE, v8qi)
>    VAR1 (BINOP, qtbl3, 0, NONE, v16qi)
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 
> 41cd6ccb354b0231409372c0f1b5e1b87e4a9169..46f919fb254b98f887db4748d3b410b7d18e8a4e
>  100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -9571,74 +9571,46 @@ vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, 
> const int __c)
>  
>  __extension__ extern __inline poly8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -vqtbl1_p8 (poly8x16_t __a, uint8x8_t __b)
> +vqtbl1_p8 (poly8x16_t __tab, uint8x8_t __idx)
>  {
> -  poly8x8_t __result;
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__a), "w"(__b)
> -           : /* No clobbers */);
> -  return __result;
> +  return (poly8x8_t) __builtin_aarch64_tbl1v8qi ((int8x16_t) __tab,
> +                                              (int8x8_t) __idx);
>  }
>  
>  __extension__ extern __inline int8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -vqtbl1_s8 (int8x16_t __a, uint8x8_t __b)
> +vqtbl1_s8 (int8x16_t __tab, uint8x8_t __idx)
>  {
> -  int8x8_t __result;
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__a), "w"(__b)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbl1v8qi (__tab, (int8x8_t) __idx);
>  }
>  
>  __extension__ extern __inline uint8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -vqtbl1_u8 (uint8x16_t __a, uint8x8_t __b)
> +vqtbl1_u8 (uint8x16_t __tab, uint8x8_t __idx)
>  {
> -  uint8x8_t __result;
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__a), "w"(__b)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbl1v8qi_uuu (__tab, __idx);
>  }
>  
>  __extension__ extern __inline poly8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -vqtbl1q_p8 (poly8x16_t __a, uint8x16_t __b)
> +vqtbl1q_p8 (poly8x16_t __tab, uint8x16_t __idx)
>  {
> -  poly8x16_t __result;
> -  __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
> -           : "=w"(__result)
> -           : "w"(__a), "w"(__b)
> -           : /* No clobbers */);
> -  return __result;
> +  return (poly8x16_t) __builtin_aarch64_tbl1v16qi ((int8x16_t) __tab,
> +                                                (int8x16_t) __idx);
>  }
>  
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -vqtbl1q_s8 (int8x16_t __a, uint8x16_t __b)
> +vqtbl1q_s8 (int8x16_t __tab, uint8x16_t __idx)
>  {
> -  int8x16_t __result;
> -  __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
> -           : "=w"(__result)
> -           : "w"(__a), "w"(__b)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbl1v16qi (__tab, (int8x16_t) __idx);
>  }
>  
>  __extension__ extern __inline uint8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -vqtbl1q_u8 (uint8x16_t __a, uint8x16_t __b)
> +vqtbl1q_u8 (uint8x16_t __tab, uint8x16_t __idx)
>  {
> -  uint8x16_t __result;
> -  __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
> -           : "=w"(__result)
> -           : "w"(__a), "w"(__b)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbl1v16qi_uuu (__tab, __idx);
>  }
>  
>  __extension__ extern __inline int8x8_t
> @@ -9719,78 +9691,53 @@ __extension__ extern __inline int8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbl1_s8 (int8x8_t __tab, int8x8_t __idx)
>  {
> -  int8x8_t __result;
> -  int8x16_t __temp = vcombine_s8 (__tab, vcreate_s8 (__AARCH64_UINT64_C 
> (0x0)));
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  int8x16_t __temp = vcombine_s8 (__tab,
> +                               vcreate_s8 (__AARCH64_UINT64_C (0x0)));
> +  return __builtin_aarch64_tbl1v8qi (__temp, __idx);
>  }
>  
>  __extension__ extern __inline uint8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbl1_u8 (uint8x8_t __tab, uint8x8_t __idx)
>  {
> -  uint8x8_t __result;
> -  uint8x16_t __temp = vcombine_u8 (__tab, vcreate_u8 (__AARCH64_UINT64_C 
> (0x0)));
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  uint8x16_t __temp = vcombine_u8 (__tab,
> +                                vcreate_u8 (__AARCH64_UINT64_C (0x0)));
> +  return __builtin_aarch64_tbl1v8qi_uuu (__temp, __idx);
>  }
>  
>  __extension__ extern __inline poly8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbl1_p8 (poly8x8_t __tab, uint8x8_t __idx)
>  {
> -  poly8x8_t __result;
> -  poly8x16_t __temp = vcombine_p8 (__tab, vcreate_p8 (__AARCH64_UINT64_C 
> (0x0)));
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  poly8x16_t __temp = vcombine_p8 (__tab,
> +                                vcreate_p8 (__AARCH64_UINT64_C (0x0)));
> +  return (poly8x8_t) __builtin_aarch64_tbl1v8qi ((int8x16_t) __temp,
> +                                              (int8x8_t) __idx);
>  }
>  
>  __extension__ extern __inline int8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbl2_s8 (int8x8x2_t __tab, int8x8_t __idx)
>  {
> -  int8x8_t __result;
>    int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]);
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbl1v8qi (__temp, __idx);
>  }
>  
>  __extension__ extern __inline uint8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbl2_u8 (uint8x8x2_t __tab, uint8x8_t __idx)
>  {
> -  uint8x8_t __result;
>    uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]);
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbl1v8qi_uuu (__temp, __idx);
>  }
>  
>  __extension__ extern __inline poly8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbl2_p8 (poly8x8x2_t __tab, uint8x8_t __idx)
>  {
> -  poly8x8_t __result;
>    poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]);
> -  __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
> -           : "=w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return (poly8x8_t) __builtin_aarch64_tbl1v8qi ((int8x16_t) __temp,
> +                                              (int8x8_t) __idx);
>  }
>  
>  __extension__ extern __inline int8x8_t

Reply via email to