Hi!

The following testcase is miscompiled since r14-8680 PR113560 changes.
I've already tried to fix some of the issues caused by that change in
r14-8823 PR113759, but apparently didn't get it right.

The problem is that the r14-8680 changes sometimes set *type_out to
a narrower type than the *new_rhs_out actually has (because it will
handle stuff like _1 = rhs1 & 0xffff; and imply from that HImode type_out.

Now, if in convert_mult_to_widen or convert_plusminus_to_widen we actually
get optab for the modes we've asked for (i.e. with from_mode and to_mode),
everything works fine, if the operands don't have the expected types,
they are converted to those (for INTEGER_CSTs with fold_convert,
otherwise with build_and_insert_cast).
On the following testcase on aarch64 that is not the case, we ask
for from_mode HImode and to_mode DImode, but get actual_mode SImode.
The mult_rhs1 operand already has SImode and we change type1 to unsigned int
and so no cast is actually done, except that the & 0xffff is lost that way.

The following patch ensures that if we change typeN because of wider
actual_mode (or because of a sign change), we first cast to the old
typeN (if the r14-8680 code was encountered, otherwise it would have the
same precision) and only then change it, and then perhaps cast again.

Bootstrapped/regtested on x86_64-linux and i686-linux and tested on the
testcase with cross to aarch64-linux, where it results in the expected
-       add     x19, x19, w0, uxtw 1
+       add     x19, x19, w0, uxth 1
difference.  Ok for trunk?

2025-03-26  Jakub Jelinek  <ja...@redhat.com>

        PR tree-optimization/119417
        * tree-ssa-math-opts.cc (convert_mult_to_widen): Before changing
        typeN because actual_precision/from_unsignedN differs cast rhsN
        to typeN if it has a different type.
        (convert_plusminus_to_widen): Before changing
        typeN because actual_precision/from_unsignedN differs cast mult_rhsN
        to typeN if it has a different type.

        * gcc.dg/torture/pr119417.c: New test.

--- gcc/tree-ssa-math-opts.cc.jj        2025-01-02 11:23:15.939524919 +0100
+++ gcc/tree-ssa-math-opts.cc   2025-03-25 15:56:34.171411578 +0100
@@ -2800,7 +2800,17 @@ convert_mult_to_widen (gimple *stmt, gim
     return false;
   if (actual_precision != TYPE_PRECISION (type1)
       || from_unsigned1 != TYPE_UNSIGNED (type1))
-    type1 = build_nonstandard_integer_type (actual_precision, from_unsigned1);
+    {
+      if (!useless_type_conversion_p (type1, TREE_TYPE (rhs1)))
+       {
+         if (TREE_CODE (rhs1) == INTEGER_CST)
+           rhs1 = fold_convert (type1, rhs1);
+         else
+           rhs1 = build_and_insert_cast (gsi, loc, type1, rhs1);
+       }
+      type1 = build_nonstandard_integer_type (actual_precision,
+                                             from_unsigned1);
+    }
   if (!useless_type_conversion_p (type1, TREE_TYPE (rhs1)))
     {
       if (TREE_CODE (rhs1) == INTEGER_CST)
@@ -2810,7 +2820,17 @@ convert_mult_to_widen (gimple *stmt, gim
     }
   if (actual_precision != TYPE_PRECISION (type2)
       || from_unsigned2 != TYPE_UNSIGNED (type2))
-    type2 = build_nonstandard_integer_type (actual_precision, from_unsigned2);
+    {
+      if (!useless_type_conversion_p (type2, TREE_TYPE (rhs2)))
+       {
+         if (TREE_CODE (rhs2) == INTEGER_CST)
+           rhs2 = fold_convert (type2, rhs2);
+         else
+           rhs2 = build_and_insert_cast (gsi, loc, type2, rhs2);
+       }
+      type2 = build_nonstandard_integer_type (actual_precision,
+                                             from_unsigned2);
+    }
   if (!useless_type_conversion_p (type2, TREE_TYPE (rhs2)))
     {
       if (TREE_CODE (rhs2) == INTEGER_CST)
@@ -3021,7 +3041,17 @@ convert_plusminus_to_widen (gimple_stmt_
   actual_precision = GET_MODE_PRECISION (actual_mode);
   if (actual_precision != TYPE_PRECISION (type1)
       || from_unsigned1 != TYPE_UNSIGNED (type1))
-    type1 = build_nonstandard_integer_type (actual_precision, from_unsigned1);
+    {
+      if (!useless_type_conversion_p (type1, TREE_TYPE (mult_rhs1)))
+       {
+         if (TREE_CODE (mult_rhs1) == INTEGER_CST)
+           mult_rhs1 = fold_convert (type1, mult_rhs1);
+         else
+           mult_rhs1 = build_and_insert_cast (gsi, loc, type1, mult_rhs1);
+       }
+      type1 = build_nonstandard_integer_type (actual_precision,
+                                             from_unsigned1);
+    }
   if (!useless_type_conversion_p (type1, TREE_TYPE (mult_rhs1)))
     {
       if (TREE_CODE (mult_rhs1) == INTEGER_CST)
@@ -3031,7 +3061,17 @@ convert_plusminus_to_widen (gimple_stmt_
     }
   if (actual_precision != TYPE_PRECISION (type2)
       || from_unsigned2 != TYPE_UNSIGNED (type2))
-    type2 = build_nonstandard_integer_type (actual_precision, from_unsigned2);
+    {
+      if (!useless_type_conversion_p (type2, TREE_TYPE (mult_rhs2)))
+       {
+         if (TREE_CODE (mult_rhs2) == INTEGER_CST)
+           mult_rhs2 = fold_convert (type2, mult_rhs2);
+         else
+           mult_rhs2 = build_and_insert_cast (gsi, loc, type2, mult_rhs2);
+       }
+      type2 = build_nonstandard_integer_type (actual_precision,
+                                             from_unsigned2);
+    }
   if (!useless_type_conversion_p (type2, TREE_TYPE (mult_rhs2)))
     {
       if (TREE_CODE (mult_rhs2) == INTEGER_CST)
--- gcc/testsuite/gcc.dg/torture/pr119417.c.jj  2025-03-25 16:03:42.895646634 
+0100
+++ gcc/testsuite/gcc.dg/torture/pr119417.c     2025-03-25 16:03:18.363976613 
+0100
@@ -0,0 +1,24 @@
+/* PR tree-optimization/119417 */
+/* { dg-do run { target int32 } } */
+
+__attribute__((noipa)) void
+foo (unsigned long long x)
+{
+  if (x != 0)
+    __builtin_abort ();
+}
+
+unsigned v = 0x10000;
+
+int
+main ()
+{
+  unsigned long long a = 0;
+  while (1)
+    {
+      a = a + ((v & 0xFFFF) * 2);
+      foo (a);
+      if (v)
+       break;
+    }
+}

        Jakub

Reply via email to