On Tue, Jun 14, 2011 at 11:49:08AM +0200, Richard Guenther wrote:
> So I'd say we revert your patch for now and if somebody feels like
> implementing the above ...

Ok, here is what I've bootstrapped/regtested on x86_64-linux and i686-linux
and committed to trunk and 4.6 branch:

2011-06-14  Jakub Jelinek  <ja...@redhat.com>

        PR rtl-optimization/49390
        Revert:
        2010-06-29  Bernd Schmidt  <ber...@codesourcery.com>

        * cse.c (exp_equiv_p): For MEMs, if for_gcse, only compare
        MEM_ALIAS_SET.

        * gcc.c-torture/execute/pr49390.c: New test.

--- gcc/cse.c.jj        (revision 161534)
+++ gcc/cse.c   (revision 161533)
@@ -2669,16 +2669,26 @@
     case MEM:
       if (for_gcse)
        {
-         /* Can't merge two expressions in different alias sets, since we
-            can decide that the expression is transparent in a block when
-            it isn't, due to it being set with the different alias set.  */
-         if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
-           return 0;
-
          /* A volatile mem should not be considered equivalent to any
             other.  */
          if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
            return 0;
+
+         /* Can't merge two expressions in different alias sets, since we
+            can decide that the expression is transparent in a block when
+            it isn't, due to it being set with the different alias set.
+
+            Also, can't merge two expressions with different MEM_ATTRS.
+            They could e.g. be two different entities allocated into the
+            same space on the stack (see e.g. PR25130).  In that case, the
+            MEM addresses can be the same, even though the two MEMs are
+            absolutely not equivalent.
+
+            But because really all MEM attributes should be the same for
+            equivalent MEMs, we just use the invariant that MEMs that have
+            the same attributes share the same mem_attrs data structure.  */
+         if (MEM_ATTRS (x) != MEM_ATTRS (y))
+           return 0;
        }
       break;
 
--- gcc/testsuite/gcc.c-torture/execute/pr49390.c.jj    2011-06-13 
17:28:09.000000000 +0200
+++ gcc/testsuite/gcc.c-torture/execute/pr49390.c       2011-06-13 
17:27:49.000000000 +0200
@@ -0,0 +1,88 @@
+/* PR rtl-optimization/49390 */
+
+struct S { unsigned int s1; unsigned int s2; };
+struct T { unsigned int t1; struct S t2; };
+struct U { unsigned short u1; unsigned short u2; };
+struct V { struct U v1; struct T v2; };
+struct S a;
+char *b;
+union { char b[64]; struct V v; } u;
+volatile int v;
+extern void abort (void);
+
+__attribute__((noinline, noclone)) void
+foo (int x, void *y, unsigned int z, unsigned int w)
+{
+  if (x != 4 || y != (void *) &u.v.v2)
+    abort ();
+  v = z + w;
+  v = 16384;
+}
+
+__attribute__((noinline, noclone)) void
+bar (struct S x)
+{
+  v = x.s1;
+  v = x.s2;
+}
+
+__attribute__((noinline, noclone)) int
+baz (struct S *x)
+{
+  v = x->s1;
+  v = x->s2;
+  v = 0;
+  return v + 1;
+}
+
+__attribute__((noinline, noclone)) void
+test (struct S *c)
+{
+  struct T *d;
+  struct S e = a;
+  unsigned int f, g;
+  if (c == 0)
+    c = &e;
+  else
+    {
+      if (c->s2 % 8192 <= 15 || (8192 - c->s2 % 8192) <= 31)
+       foo (1, 0, c->s1, c->s2);
+    }
+  if (!baz (c))
+    return;
+  g = (((struct U *) b)->u2 & 2) ? 32 : __builtin_offsetof (struct V, v2);
+  f = c->s2 % 8192;
+  if (f == 0)
+    {
+      e.s2 += g;
+      f = g;
+    }
+  else if (f < g)
+    {
+      foo (2, 0, c->s1, c->s2);
+      return;
+    }
+  if ((((struct U *) b)->u2 & 1) && f == g)
+    {
+      bar (*c);
+      foo (3, 0, c->s1, c->s2);
+      return;
+    }
+  d = (struct T *) (b + c->s2 % 8192);
+  if (d->t2.s1 >= c->s1 && (d->t2.s1 != c->s1 || d->t2.s2 >= c->s2))
+    foo (4, d, c->s1, c->s2);
+  return;
+}
+
+int
+main ()
+{
+  struct S *c = 0;
+  asm ("" : "+r" (c) : "r" (&a));
+  u.v.v2.t2.s1 = 8192;
+  b = u.b;
+  test (c);
+  if (v != 16384)
+    abort ();
+  return 0;
+}

        Jakub

Reply via email to