https://gcc.gnu.org/g:c9f76617dd50d4f95e38461b81d17c826d8f48ec

commit c9f76617dd50d4f95e38461b81d17c826d8f48ec
Author: Alexandre Oliva <ol...@adacore.com>
Date:   Thu Jan 9 03:05:41 2025 -0300

    [ifcombine] adjust for narrowing converts before shifts
    
    A narrowing conversion and a shift both drop bits from the loaded
    value, but we need to take into account which one comes first to get
    the right number of bits and mask.
    
    
    for  gcc/ChangeLog
    
            PR tree-optimization/118206
            * gimple-fold.cc (decode_field_reference): Account for upper
            bits dropped by narrowing conversions whether before or after
            a right shift.
    
    for  gcc/testsuite/ChangeLog
    
            PR tree-optimization/118206
            * gcc.dg/field-merge-18.c: New.

Diff:
---
 gcc/gimple-fold.cc                    | 19 ++++++++++++++-
 gcc/testsuite/gcc.dg/field-merge-18.c | 45 +++++++++++++++++++++++++++++++++++
 2 files changed, 63 insertions(+), 1 deletion(-)

diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index c8a726e0ae3f..a01f76cd66f5 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -7547,6 +7547,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
   int shiftrt = 0;
   tree res_ops[2];
   machine_mode mode;
+  bool convert_before_shift = false;
 
   *load = NULL;
   *psignbit = false;
@@ -7651,6 +7652,12 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
       if (*load)
        loc[3] = gimple_location (*load);
       exp = res_ops[0];
+      /* This looks backwards, but we're going back the def chain, so if we
+        find the conversion here, after finding a shift, that's because the
+        convert appears before the shift, and we should thus adjust the bit
+        pos and size because of the shift after adjusting it due to type
+        conversion.  */
+      convert_before_shift = true;
     }
 
   /* Identify the load, if there is one.  */
@@ -7693,6 +7700,15 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
   *pvolatilep = volatilep;
 
   /* Adjust shifts...  */
+  if (convert_before_shift
+      && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
+    {
+      HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
+      if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
+       *pbitpos += excess;
+      *pbitsize -= excess;
+    }
+
   if (shiftrt)
     {
       if (!*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
@@ -7701,7 +7717,8 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
     }
 
   /* ... and bit position.  */
-  if (outer_type && *pbitsize > TYPE_PRECISION (outer_type))
+  if (!convert_before_shift
+      && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
     {
       HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
       if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
diff --git a/gcc/testsuite/gcc.dg/field-merge-18.c 
b/gcc/testsuite/gcc.dg/field-merge-18.c
new file mode 100644
index 000000000000..06857ca8d773
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/field-merge-18.c
@@ -0,0 +1,45 @@
+/* { dg-do run } */
+/* { dg-options "-O1" } */
+
+/* PR tree-optimization/118206 */
+/* Check that shifts, whether before or after narrowing conversions, mask out
+   the bits that are to be discarded.  */
+
+__attribute__((noipa)) int
+foo (const void *x)
+{
+  unsigned short b;
+  __builtin_memcpy (&b, x, sizeof (short));
+  if ((b & 15) != 8)
+    return 1;
+  if ((((unsigned char) b) >> 4) > 7)
+    return 1;
+  return 0;
+}
+
+__attribute__((noipa)) int
+bar (const void *x)
+{
+  unsigned short b;
+  __builtin_memcpy (&b, x, sizeof (short));
+  if ((b & 15) != 8)
+    return 1;
+  if ((unsigned char)(b >> 4) > 7)
+    return 1;
+  return 0;
+}
+
+int
+main ()
+{
+  short a = 0xff78;
+  if (foo (&a) != 0 || bar (&a) != 1)
+    __builtin_abort ();
+  short b = 0x88;
+  if (foo (&b) != 1 || bar (&b) != 1)
+    __builtin_abort ();
+  short c = 8;
+  if (foo (&c) != 0 || bar (&c) != 0)
+    __builtin_abort ();
+  return 0;
+}

Reply via email to