https://gcc.gnu.org/g:23364e8369a264e58a051ea74a9077eef3d27ae6

commit 23364e8369a264e58a051ea74a9077eef3d27ae6
Author: Alexandre Oliva <ol...@adacore.com>
Date:   Mon Jan 13 14:53:25 2025 -0300

    [ifcombine] check and extend constants to compare with bitfields
    
    Add logic to check and extend constants compared with bitfields, so
    that fields are only compared with constants they could actually
    equal.  This involves making sure the signedness doesn't change
    between loads and conversions before shifts: we'd need to carry a lot
    more data to deal with all the possibilities.
    
    (IIRC we had logic that implicitly dealt with that when compare
    constants were represented as typed trees, but after the rewrite to
    represent them as wide_ints, the logic seemed superfluous.  It wasn't.)
    
    While at that, I've robustified decode_field_reference to use local
    variables throughout, to modify the out parms only when we're about to
    return non-NULL, and to drop the unused case of NULL pand_mask, that
    had a latent failure to detect signbit masking.
    
    
    for  gcc/ChangeLog
    
            PR tree-optimization/118456
            * gimple-fold.cc (decode_field_reference): Punt if shifting
            after changing signedness.  Rebustify to set out parms only
            when returning non-NULL.
            (fold_truth_andor_for_ifcombine): Check extension bits in
            constants before clipping.  Add complementary assert on
            r_const's not being set when l_const isn't.
    
    for  gcc/testsuite/ChangeLog
    
            PR tree-optimization/118456
            * gcc.dg/field-merge-21.c: New.
            * gcc.dg/field-merge-22.c: New.

Diff:
---
 gcc/gimple-fold.cc                    | 153 +++++++++++++++++++++-------------
 gcc/testsuite/gcc.dg/field-merge-21.c |  52 ++++++++++++
 gcc/testsuite/gcc.dg/field-merge-22.c |  30 +++++++
 3 files changed, 179 insertions(+), 56 deletions(-)

diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 93ed8b3abb05..60d40b1b0d04 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -7510,8 +7510,7 @@ gimple_binop_def_p (enum tree_code code, tree t, tree 
op[2])
    *PREVERSEP is set to the storage order of the field.
 
    *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.  If
-   PAND_MASK *is NULL, BIT_AND_EXPR is not recognized.  If *PAND_MASK
-   is initially set to a mask with nonzero precision, that mask is
+   *PAND_MASK is initially set to a mask with nonzero precision, that mask is
    combined with the found mask, or adjusted in precision to match.
 
    *PSIGNBIT is set to TRUE if, before clipping to *PBITSIZE, the mask
@@ -7539,7 +7538,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
                        bool *punsignedp, bool *preversep, bool *pvolatilep,
                        wide_int *pand_mask, bool *psignbit,
                        bool *xor_p, tree *xor_cmp_op, wide_int *xor_pand_mask,
-                       gimple **load, location_t loc[4])
+                       gimple **loadp, location_t loc[4])
 {
   tree exp = *pexp;
   tree outer_type = 0;
@@ -7549,9 +7548,11 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
   tree res_ops[2];
   machine_mode mode;
   bool convert_before_shift = false;
-
-  *load = NULL;
-  *psignbit = false;
+  bool signbit = false;
+  bool xorp = false;
+  tree xor_op;
+  wide_int xor_mask;
+  gimple *load = NULL;
 
   /* All the optimizations using this function assume integer fields.
      There are problems with FP fields since the type_for_size call
@@ -7576,7 +7577,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
 
   /* Recognize and save a masking operation.  Combine it with an
      incoming mask.  */
-  if (pand_mask && gimple_binop_def_p (BIT_AND_EXPR, exp, res_ops)
+  if (gimple_binop_def_p (BIT_AND_EXPR, exp, res_ops)
       && TREE_CODE (res_ops[1]) == INTEGER_CST)
     {
       loc[1] = gimple_location (SSA_NAME_DEF_STMT (exp));
@@ -7596,7 +7597,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
            and_mask &= wide_int::from (*pand_mask, prec_op, UNSIGNED);
        }
     }
-  else if (pand_mask)
+  else
     and_mask = *pand_mask;
 
   /* Turn (a ^ b) [!]= 0 into a [!]= b.  */
@@ -7615,10 +7616,10 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
        return NULL_TREE;
       else if (integer_zerop (*xor_cmp_op))
        {
-         *xor_p = true;
+         xorp = true;
          exp = res_ops[0];
-         *xor_cmp_op = *pexp;
-         *xor_pand_mask = *pand_mask;
+         xor_op = *pexp;
+         xor_mask = *pand_mask;
        }
     }
 
@@ -7646,12 +7647,12 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
   /* Yet another chance to drop conversions.  This one is allowed to
      match a converting load, subsuming the load identification block
      below.  */
-  if (!outer_type && gimple_convert_def_p (exp, res_ops, load))
+  if (!outer_type && gimple_convert_def_p (exp, res_ops, &load))
     {
       outer_type = TREE_TYPE (exp);
       loc[0] = gimple_location (SSA_NAME_DEF_STMT (exp));
-      if (*load)
-       loc[3] = gimple_location (*load);
+      if (load)
+       loc[3] = gimple_location (load);
       exp = res_ops[0];
       /* This looks backwards, but we're going back the def chain, so if we
         find the conversion here, after finding a shift, that's because the
@@ -7662,14 +7663,13 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
     }
 
   /* Identify the load, if there is one.  */
-  if (!(*load) && TREE_CODE (exp) == SSA_NAME
-      && !SSA_NAME_IS_DEFAULT_DEF (exp))
+  if (!load && TREE_CODE (exp) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (exp))
     {
       gimple *def = SSA_NAME_DEF_STMT (exp);
       if (gimple_assign_load_p (def))
        {
          loc[3] = gimple_location (def);
-         *load = def;
+         load = def;
          exp = gimple_assign_rhs1 (def);
        }
     }
@@ -7694,63 +7694,76 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
          && !type_has_mode_precision_p (TREE_TYPE (inner))))
     return NULL_TREE;
 
-  *pbitsize = bs;
-  *pbitpos = bp;
-  *punsignedp = unsignedp;
-  *preversep = reversep;
-  *pvolatilep = volatilep;
-
   /* Adjust shifts...  */
   if (convert_before_shift
-      && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
+      && outer_type && bs > TYPE_PRECISION (outer_type))
     {
-      HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
-      if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
-       *pbitpos += excess;
-      *pbitsize -= excess;
+      HOST_WIDE_INT excess = bs - TYPE_PRECISION (outer_type);
+      if (reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
+       bp += excess;
+      bs -= excess;
     }
 
   if (shiftrt)
     {
-      if (!*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
-       *pbitpos += shiftrt;
-      *pbitsize -= shiftrt;
+      /* Punt if we're shifting by more than the loaded bitfield (after
+        adjustment), or if there's a shift after a change of signedness, punt.
+        When comparing this field with a constant, we'll check that the
+        constant is a proper sign- or zero-extension (depending on signedness)
+        of a value that would fit in the selected portion of the bitfield.  A
+        shift after a change of signedness would make the extension
+        non-uniform, and we can't deal with that (yet ???).  See
+        gcc.dg/field-merge-22.c for a test that would go wrong.  */
+      if (bs <= shiftrt
+         || (outer_type && unsignedp != TYPE_UNSIGNED (outer_type)))
+       return NULL_TREE;
+      if (!reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
+       bp += shiftrt;
+      bs -= shiftrt;
     }
 
   /* ... and bit position.  */
   if (!convert_before_shift
-      && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
+      && outer_type && bs > TYPE_PRECISION (outer_type))
     {
-      HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
-      if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
-       *pbitpos += excess;
-      *pbitsize -= excess;
+      HOST_WIDE_INT excess = bs - TYPE_PRECISION (outer_type);
+      if (reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
+       bp += excess;
+      bs -= excess;
     }
 
-  *pexp = exp;
-
   /* If the number of bits in the reference is the same as the bitsize of
      the outer type, then the outer type gives the signedness. Otherwise
      (in case of a small bitfield) the signedness is unchanged.  */
-  if (outer_type && *pbitsize == TYPE_PRECISION (outer_type))
-    *punsignedp = TYPE_UNSIGNED (outer_type);
+  if (outer_type && bs == TYPE_PRECISION (outer_type))
+    unsignedp = TYPE_UNSIGNED (outer_type);
 
-  if (pand_mask)
+  /* Make the mask the expected width.  */
+  if (and_mask.get_precision () != 0)
     {
-      /* Make the mask the expected width.  */
-      if (and_mask.get_precision () != 0)
-       {
-         /* If the AND_MASK encompasses bits that would be extensions of
-            the sign bit, set *PSIGNBIT.  */
-         if (!unsignedp
-             && and_mask.get_precision () > *pbitsize
-             && (and_mask
-                 & wi::mask (*pbitsize, true, and_mask.get_precision ())) != 0)
-           *psignbit = true;
-         and_mask = wide_int::from (and_mask, *pbitsize, UNSIGNED);
-       }
+      /* If the AND_MASK encompasses bits that would be extensions of
+        the sign bit, set SIGNBIT.  */
+      if (!unsignedp
+         && and_mask.get_precision () > bs
+         && (and_mask & wi::mask (bs, true, and_mask.get_precision ())) != 0)
+       signbit = true;
+      and_mask = wide_int::from (and_mask, bs, UNSIGNED);
+    }
 
-      *pand_mask = and_mask;
+  *pexp = exp;
+  *loadp = load;
+  *pbitsize = bs;
+  *pbitpos = bp;
+  *punsignedp = unsignedp;
+  *preversep = reversep;
+  *pvolatilep = volatilep;
+  *psignbit = signbit;
+  *pand_mask = and_mask;
+  if (xorp)
+    {
+      *xor_p = xorp;
+      *xor_cmp_op = xor_op;
+      *xor_pand_mask = xor_mask;
     }
 
   return inner;
@@ -8512,13 +8525,25 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
      and bit position.  */
   if (l_const.get_precision ())
     {
+      /* Before clipping upper bits of the right-hand operand of the compare,
+        check that they're sign or zero extensions, depending on how the
+        left-hand operand would be extended.  */
+      bool l_non_ext_bits = false;
+      if (ll_bitsize < lr_bitsize)
+       {
+         wide_int zext = wi::zext (l_const, ll_bitsize);
+         if ((ll_unsignedp ? zext : wi::sext (l_const, ll_bitsize)) == l_const)
+           l_const = zext;
+         else
+           l_non_ext_bits = true;
+       }
       /* We're doing bitwise equality tests, so don't bother with sign
         extensions.  */
       l_const = wide_int::from (l_const, lnprec, UNSIGNED);
       if (ll_and_mask.get_precision ())
        l_const &= wide_int::from (ll_and_mask, lnprec, UNSIGNED);
       l_const <<= xll_bitpos;
-      if ((l_const & ~ll_mask) != 0)
+      if (l_non_ext_bits || (l_const & ~ll_mask) != 0)
        {
          warning_at (lloc, OPT_Wtautological_compare,
                      "comparison is always %d", wanted_code == NE_EXPR);
@@ -8530,11 +8555,23 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
         again.  */
       gcc_checking_assert (r_const.get_precision ());
 
+      /* Before clipping upper bits of the right-hand operand of the compare,
+        check that they're sign or zero extensions, depending on how the
+        left-hand operand would be extended.  */
+      bool r_non_ext_bits = false;
+      if (rl_bitsize < rr_bitsize)
+       {
+         wide_int zext = wi::zext (r_const, rl_bitsize);
+         if ((rl_unsignedp ? zext : wi::sext (r_const, rl_bitsize)) == r_const)
+           r_const = zext;
+         else
+           r_non_ext_bits = true;
+       }
       r_const = wide_int::from (r_const, lnprec, UNSIGNED);
       if (rl_and_mask.get_precision ())
        r_const &= wide_int::from (rl_and_mask, lnprec, UNSIGNED);
       r_const <<= xrl_bitpos;
-      if ((r_const & ~rl_mask) != 0)
+      if (r_non_ext_bits || (r_const & ~rl_mask) != 0)
        {
          warning_at (rloc, OPT_Wtautological_compare,
                      "comparison is always %d", wanted_code == NE_EXPR);
@@ -8586,6 +8623,10 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
      storage order mismatch occurs between the left and right sides.  */
   else
     {
+      /* When we set l_const, we also set r_const, so we need not test it
+        again.  */
+      gcc_checking_assert (!r_const.get_precision ());
+
       if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
          || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
          || ll_reversep != lr_reversep
diff --git a/gcc/testsuite/gcc.dg/field-merge-21.c 
b/gcc/testsuite/gcc.dg/field-merge-21.c
new file mode 100644
index 000000000000..9e380c68c063
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/field-merge-21.c
@@ -0,0 +1,52 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+
+/* PR tree-optimization/118456 */
+/* Check that shifted fields compared with a constants compare correctly even
+   if the constant contains sign-extension bits not present in the bit
+   range.  */
+
+struct S { unsigned long long o; unsigned short a, b; } s;
+
+__attribute__((noipa)) int
+foo (void)
+{
+  return ((unsigned char) s.a) >> 3 == 17 && ((signed char) s.b) >> 2 == -27;
+}
+
+__attribute__((noipa)) int
+bar (void)
+{
+  return ((unsigned char) s.a) >> 3 == 17 && ((signed char) s.b) >> 2 == -91;
+}
+
+__attribute__((noipa)) int
+bars (void)
+{
+  return ((unsigned char) s.a) >> 3 == 17 && ((signed char) s.b) >> 2 == 37;
+}
+
+__attribute__((noipa)) int
+baz (void)
+{
+  return ((unsigned char) s.a) >> 3 == 49 && ((signed char) s.b) >> 2 == -27;
+}
+
+__attribute__((noipa)) int
+bazs (void)
+{
+  return ((unsigned char) s.a) >> 3 == (unsigned char) -15 && ((signed char) 
s.b) >> 2 == -27;
+}
+
+int
+main ()
+{
+  s.a = 17 << 3;
+  s.b = -27u << 2;
+  if (foo () != 1
+      || bar () != 0
+      || bars () != 0
+      || baz () != 0
+      || bazs () != 0)
+    __builtin_abort ();
+}
diff --git a/gcc/testsuite/gcc.dg/field-merge-22.c 
b/gcc/testsuite/gcc.dg/field-merge-22.c
new file mode 100644
index 000000000000..82eb8447616f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/field-merge-22.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+
+/* PR tree-optimization/118456 */
+/* Check that compares with constants take into account sign/zero extension of
+   both the bitfield and of the shifting type.  */
+
+#define shift (__CHAR_BIT__ - 4)
+
+struct S {
+  signed char a : shift + 2;
+  signed char b : shift + 2;
+  short ignore[0];
+} s;
+
+__attribute__((noipa)) int
+foo (void)
+{
+  return ((unsigned char) s.a) >> shift == 15
+    && ((unsigned char) s.b) >> shift == 0;
+}
+
+int
+main ()
+{
+  s.a = -1;
+  s.b = 1;
+  if (foo () != 1)
+    __builtin_abort ();
+}

Reply via email to