https://gcc.gnu.org/g:574c59cfe6e29c9e5758988b75c2e7ab6edc37da

commit r15-9149-g574c59cfe6e29c9e5758988b75c2e7ab6edc37da
Author: Richard Biener <rguent...@suse.de>
Date:   Wed Apr 2 13:12:58 2025 +0200

    tree-optimization/119586 - aligned access to unaligned data
    
    The following reverts parts of r15-8047 which assesses alignment
    analysis for VMAT_STRIDED_SLP is correct by using aligned accesses
    where allowed by it.  As the PR shows this analysis is still incorrect,
    so revert back to assuming we got it wrong.
    
            PR tree-optimization/119586
            * tree-vect-stmts.cc (vectorizable_load): Assume we got
            alignment analysis for VMAT_STRIDED_SLP wrong.
            (vectorizable_store): Likewise.
    
            * gcc.dg/vect/pr119586.c: New testcase.

Diff:
---
 gcc/testsuite/gcc.dg/vect/pr119586.c | 21 +++++++++++++++++++++
 gcc/tree-vect-stmts.cc               | 21 +++++++++++++--------
 2 files changed, 34 insertions(+), 8 deletions(-)

diff --git a/gcc/testsuite/gcc.dg/vect/pr119586.c 
b/gcc/testsuite/gcc.dg/vect/pr119586.c
new file mode 100644
index 000000000000..04a00ef131e5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr119586.c
@@ -0,0 +1,21 @@
+#include "tree-vect.h"
+
+void __attribute__((noipa)) foo (long *) {}
+void __attribute__((noipa))
+d()
+{
+  long e[6][8][5];
+  for (int b = 0; b < 6; b++)
+    for (int c = 0; c < 8; c++)
+      {
+        e[b][c][0] = 1;
+        e[b][c][1] = 1;
+        e[b][c][4] = 1;
+      }
+  foo (&e[0][0][0]);
+}
+int main()
+{
+  check_vect ();
+  d();
+}
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 8bd5ea96667d..3005ae6eaaea 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -8906,10 +8906,17 @@ vectorizable_store (vec_info *vinfo,
                }
            }
          unsigned align;
-         if (alignment_support_scheme == dr_aligned)
-           align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
-         else
-           align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+         /* ???  We'd want to use
+              if (alignment_support_scheme == dr_aligned)
+                align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+            since doing that is what we assume we can in the above checks.
+            But this interferes with groups with gaps where for example
+            VF == 2 makes the group in the unrolled loop aligned but the
+            fact that we advance with step between the two subgroups
+            makes the access to the second unaligned.  See PR119586.
+            We have to anticipate that here or adjust code generation to
+            avoid the misaligned loads by means of permutations.  */
+         align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
          /* Alignment is at most the access size if we do multiple stores.  */
          if (nstores > 1)
            align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
@@ -10884,10 +10891,8 @@ vectorizable_load (vec_info *vinfo,
                }
            }
          unsigned align;
-         if (alignment_support_scheme == dr_aligned)
-           align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
-         else
-           align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+         /* ???  The above is still wrong, see vectorizable_store.  */
+         align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
          /* Alignment is at most the access size if we do multiple loads.  */
          if (nloads > 1)
            align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);

Reply via email to