> -----Original Message-----
> From: Richard Sandiford <richard.sandif...@arm.com>
> Sent: Friday, October 15, 2021 1:26 PM
> To: Tamar Christina <tamar.christ...@arm.com>
> Cc: gcc-patches@gcc.gnu.org; nd <n...@arm.com>; Richard Earnshaw
> <richard.earns...@arm.com>; Marcus Shawcroft
> <marcus.shawcr...@arm.com>; Kyrylo Tkachov <kyrylo.tkac...@arm.com>
> Subject: Re: [PATCH]AArch64 Lower intrinsics shift to GIMPLE when possible.
> 
> Tamar Christina <tamar.christ...@arm.com> writes:
> > Hi All,
> >
> > This lowers shifts to GIMPLE when the C interpretations of the shift
> > operations matches that of AArch64.
> >
> > In C shifting right by BITSIZE is undefined, but the behavior is
> > defined in AArch64.  Additionally negative shifts lefts are undefined
> > in C but defined for the register variant of the instruction (SSHL, USHL) as
> being right shifts.
> >
> > Since we have a right shift by immediate I rewrite those cases into
> > right shifts
> >
> > So:
> >
> > int64x1_t foo3 (int64x1_t a)
> > {
> >   return vshl_s64 (a, vdup_n_s64(-6)); }
> >
> > produces:
> >
> > foo3:
> >         sshr    d0, d0, 6
> >         ret
> >
> > instead of:
> >
> > foo3:
> >         mov     x0, -6
> >         fmov    d1, x0
> >         sshl    d0, d0, d1
> >         ret
> >
> > This behavior isn't specifically mentioned for a left shift by
> > immediate, but I believe that only the case because we do have a right
> > shift by immediate but not a right shift by register.  As such I do the same
> for left shift by immediate.
> >
> > The testsuite already has various testcases for shifts (vshl.c etc) so
> > I am not adding overlapping tests here.
> >
> > Out of range shifts like
> >
> > int64x1_t foo3 (int64x1_t a)
> > {
> >   return vshl_s64 (a, vdup_n_s64(80)); }
> >
> > now get optimized to 0 as well along with undefined behaviors both in
> > C and AArch64.
> 
> The SSHL results are well-defined for all shift amounts, so we shouldn't
> convert them to undefined gimple, even as a temporary step.  E.g.:
> 
> int32x4_t foo(int32x4_t x) {
>   return vshlq_s32(x, vdupq_n_s32(256)); }
> 
> should fold to “x” (if we fold it at all).  Similarly:
> 
> int32x4_t foo(int32x4_t x) {
>   return vshlq_s32(x, vdupq_n_s32(257)); }
> 
> should fold to x << 1 (again if we fold it at all).
> 
> For a shift right:
> 
> int32x4_t foo(int32x4_t x) {
>   return vshlq_s32(x, vdupq_n_s32(-64)); }
> 
> is equivalent to:
> 
> int32x4_t foo(int32x4_t x) {
>   return vshrq_n_s32(x, 31);
> }
> 
> and so it shouldn't fold to 0.

And here I thought I had read the specs very carefully...

I will punt on  them because I don't think those ranged are common at all.


Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

--- inline copy of patch ---

diff --git a/gcc/config/aarch64/aarch64-builtins.c 
b/gcc/config/aarch64/aarch64-builtins.c
index 
f6b41d9c200d6300dee65ba60ae94488231a8a38..568775cb8effaf51a692ba12af99e9865d2cf8a3
 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -2394,6 +2394,68 @@ aarch64_general_gimple_fold_builtin (unsigned int fcode, 
gcall *stmt)
                                               1, args[0]);
        gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
        break;
+      BUILTIN_VSDQ_I_DI (BINOP, ashl, 3, NONE)
+       {
+         tree cst = args[1];
+         tree ctype = TREE_TYPE (cst);
+         if (INTEGRAL_TYPE_P (ctype)
+             && TREE_CODE (cst) == INTEGER_CST)
+           {
+             wide_int wcst = wi::to_wide (cst);
+             if (wi::neg_p (wcst, TYPE_SIGN (ctype)))
+               new_stmt =
+                 gimple_build_assign (gimple_call_lhs (stmt),
+                                      RSHIFT_EXPR, args[0],
+                                      wide_int_to_tree (ctype,
+                                                        wi::abs (wcst)));
+             else
+               new_stmt =
+                 gimple_build_assign (gimple_call_lhs (stmt),
+                                      LSHIFT_EXPR, args[0], args[1]);
+           }
+       }
+       break;
+      BUILTIN_VSDQ_I_DI (BINOP, sshl, 0, NONE)
+      BUILTIN_VSDQ_I_DI (BINOP_UUS, ushl, 0, NONE)
+       {
+         tree cst = args[1];
+         tree ctype = TREE_TYPE (cst);
+         HOST_WIDE_INT bits = GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE 
(args[0])));
+         if (INTEGRAL_TYPE_P (ctype)
+             && TREE_CODE (cst) == INTEGER_CST)
+           {
+             wide_int wcst = wi::to_wide (cst);
+             wide_int abs_cst = wi::abs (wcst);
+             if (wi::geu_p (abs_cst, bits))
+               break;
+
+             if (wi::neg_p (wcst, TYPE_SIGN (ctype)))
+               new_stmt =
+                 gimple_build_assign (gimple_call_lhs (stmt),
+                                      RSHIFT_EXPR, args[0],
+                                      wide_int_to_tree (ctype, abs_cst));
+             else
+               new_stmt =
+                 gimple_build_assign (gimple_call_lhs (stmt),
+                                      LSHIFT_EXPR, args[0], args[1]);
+           }
+       }
+       break;
+      BUILTIN_VDQ_I (SHIFTIMM, ashr, 3, NONE)
+      VAR1 (SHIFTIMM, ashr_simd, 0, NONE, di)
+      BUILTIN_VDQ_I (USHIFTIMM, lshr, 3, NONE)
+      VAR1 (USHIFTIMM, lshr_simd, 0, NONE, di)
+       {
+         tree cst = args[1];
+         tree ctype = TREE_TYPE (cst);
+         HOST_WIDE_INT bits = GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE 
(args[0])));
+         if (INTEGRAL_TYPE_P (ctype)
+             && TREE_CODE (cst) == INTEGER_CST
+             && wi::ne_p (wi::to_wide (cst), bits))
+           new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
+                                           RSHIFT_EXPR, args[0], args[1]);
+       }
+       break;
       BUILTIN_GPF (BINOP, fmulx, 0, ALL)
        {
          gcc_assert (nargs == 2);
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def 
b/gcc/config/aarch64/aarch64-simd-builtins.def
index 
402453aa9bba5949da43c984c4603196b1efd092..bbe0a4a3c4aea4187e7b1a9f10ab60e79df7b138
 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -409,7 +409,7 @@
 
   BUILTIN_VDQ_I (SHIFTIMM, ashr, 3, NONE)
   VAR1 (SHIFTIMM, ashr_simd, 0, NONE, di)
-  BUILTIN_VDQ_I (SHIFTIMM, lshr, 3, NONE)
+  BUILTIN_VDQ_I (USHIFTIMM, lshr, 3, NONE)
   VAR1 (USHIFTIMM, lshr_simd, 0, NONE, di)
   /* Implemented by aarch64_<sur>shr_n<mode>.  */
   BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n, 0, NONE)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 
635a223b59eb0f64304351939d444411b697af81..c4ef5f7f7e3658c830893931ef5a874842410e10
 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -27400,21 +27400,21 @@ __extension__ extern __inline uint8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshr_n_u8 (uint8x8_t __a, const int __b)
 {
-  return (uint8x8_t) __builtin_aarch64_lshrv8qi ((int8x8_t) __a, __b);
+  return __builtin_aarch64_lshrv8qi_uus (__a, __b);
 }
 
 __extension__ extern __inline uint16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshr_n_u16 (uint16x4_t __a, const int __b)
 {
-  return (uint16x4_t) __builtin_aarch64_lshrv4hi ((int16x4_t) __a, __b);
+  return __builtin_aarch64_lshrv4hi_uus (__a, __b);
 }
 
 __extension__ extern __inline uint32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshr_n_u32 (uint32x2_t __a, const int __b)
 {
-  return (uint32x2_t) __builtin_aarch64_lshrv2si ((int32x2_t) __a, __b);
+  return __builtin_aarch64_lshrv2si_uus (__a, __b);
 }
 
 __extension__ extern __inline uint64x1_t
@@ -27456,28 +27456,28 @@ __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshrq_n_u8 (uint8x16_t __a, const int __b)
 {
-  return (uint8x16_t) __builtin_aarch64_lshrv16qi ((int8x16_t) __a, __b);
+  return __builtin_aarch64_lshrv16qi_uus (__a, __b);
 }
 
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshrq_n_u16 (uint16x8_t __a, const int __b)
 {
-  return (uint16x8_t) __builtin_aarch64_lshrv8hi ((int16x8_t) __a, __b);
+  return __builtin_aarch64_lshrv8hi_uus (__a, __b);
 }
 
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshrq_n_u32 (uint32x4_t __a, const int __b)
 {
-  return (uint32x4_t) __builtin_aarch64_lshrv4si ((int32x4_t) __a, __b);
+  return __builtin_aarch64_lshrv4si_uus (__a, __b);
 }
 
 __extension__ extern __inline uint64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshrq_n_u64 (uint64x2_t __a, const int __b)
 {
-  return (uint64x2_t) __builtin_aarch64_lshrv2di ((int64x2_t) __a, __b);
+  return __builtin_aarch64_lshrv2di_uus (__a, __b);
 }
 
 __extension__ extern __inline int64_t
diff --git a/gcc/testsuite/gcc.target/aarch64/signbit-2.c 
b/gcc/testsuite/gcc.target/aarch64/signbit-2.c
new file mode 100644
index 
0000000000000000000000000000000000000000..e4e9afc854317cb599fa8118a1117c5a52e6f497
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/signbit-2.c
@@ -0,0 +1,36 @@
+/* { dg-do assemble } */
+/* { dg-options "-O1 --save-temps" } */
+
+#include <arm_neon.h>
+
+int32x2_t foo1 (int32x2_t a)
+{
+  return vshr_n_s32 (vneg_s32 (a), 31);
+}
+
+int32x4_t foo2 (int32x4_t a)
+{
+  return vshrq_n_s32 (vnegq_s32 (a), 31);
+}
+
+int16x8_t foo3 (int16x8_t a)
+{
+  return vshrq_n_s16 (vnegq_s16 (a), 15);
+}
+
+int16x4_t foo4 (int16x4_t a)
+{
+  return vshr_n_s16 (vneg_s16 (a), 15);
+}
+
+int8x16_t foo5 (int8x16_t a)
+{
+  return vshrq_n_s8 (vnegq_s8 (a), 7);
+}
+
+int8x8_t foo6 (int8x8_t a)
+{
+  return vshr_n_s8 (vneg_s8 (a), 7);
+}
+
+/* { dg-final { scan-assembler-times {\tcmgt\t} 6 } } */

Attachment: rb14935.patch
Description: rb14935.patch

Reply via email to