When doing strided SLP vectorization we use the wrong alignment for
the possibly piecewise access of the vector elements for loads and
stores.  While we are carefully using element aligned loads and
stores that isn't enough for the case the original scalar accesses
are packed.  The following instead honors larger alignment when
present but correctly falls back to the original scalar alignment
used.

Bootstrapped on x86_64-unknown-linux-gnu, testing in progress.

Richard.

        PR tree-optimization/119155
        * tree-vect-stmts.cc (vectorizable_store): Do not always
        use vector element alignment for VMAT_STRIDED_SLP but
        a more correct alignment towards both ends.
        (vectorizable_load): Likewise.

        * gcc.dg/vect/pr119155.c: New testcase.
---
 gcc/testsuite/gcc.dg/vect/pr119155.c | 26 ++++++++++++++++++++++++++
 gcc/tree-vect-stmts.cc               | 21 ++++++++++++++++++---
 2 files changed, 44 insertions(+), 3 deletions(-)
 create mode 100644 gcc/testsuite/gcc.dg/vect/pr119155.c

diff --git a/gcc/testsuite/gcc.dg/vect/pr119155.c 
b/gcc/testsuite/gcc.dg/vect/pr119155.c
new file mode 100644
index 00000000000..b860cf24b0f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr119155.c
@@ -0,0 +1,26 @@
+#include <stdlib.h>
+#include "tree-vect.h"
+
+struct s { int x; } __attribute__((packed));
+
+void __attribute__((noipa))
+f (char *xc, char *yc, int z)
+{
+  for (int i = 0; i < 100; ++i)
+    {
+      struct s *x = (struct s *) xc;
+      struct s *y = (struct s *) yc;
+      x->x += y->x;
+      xc += z;
+      yc += z;
+    }
+}
+
+int main ()
+{
+  check_vect ();
+  char *x = malloc (100 * sizeof (struct s) + 1);
+  char *y = malloc (100 * sizeof (struct s) + 1);
+  f (x + 1, y + 1, sizeof (struct s));
+  return 0;
+}
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 6bbb16beff2..7d0a7fc4033 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -8782,7 +8782,15 @@ vectorizable_store (vec_info *vinfo,
                    }
                }
            }
-         ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
+         unsigned align;
+         if (alignment_support_scheme == dr_aligned)
+           align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+         else
+           align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+         /* Alignment is at most the access size if we do multiple stores.  */
+         if (nstores > 1)
+           align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
+         ltype = build_aligned_type (ltype, align * BITS_PER_UNIT);
          ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
        }
 
@@ -10750,8 +10758,15 @@ vectorizable_load (vec_info *vinfo,
                    }
                }
            }
-         /* Else fall back to the default element-wise access.  */
-         ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
+         unsigned align;
+         if (alignment_support_scheme == dr_aligned)
+           align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+         else
+           align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+         /* Alignment is at most the access size if we do multiple loads.  */
+         if (nloads > 1)
+           align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
+         ltype = build_aligned_type (ltype, align * BITS_PER_UNIT);
        }
 
       if (slp)
-- 
2.43.0

Reply via email to