https://gcc.gnu.org/g:b6822bf3e3f3ff37d64be700f139c8fce3a9bf44

commit r15-321-gb6822bf3e3f3ff37d64be700f139c8fce3a9bf44
Author: Richard Biener <rguent...@suse.de>
Date:   Tue Mar 5 16:07:41 2024 +0100

    Fix non-grouped SLP load/store accounting in alignment peeling
    
    When we have a non-grouped access we bogously multiply by zero.
    This shows most with single-lane SLP but also happens with
    the multi-lane splat case.
    
            * tree-vect-data-refs.cc (vect_enhance_data_refs_alignment):
            Properly guard DR_GROUP_SIZE access with STMT_VINFO_GROUPED_ACCESS.

Diff:
---
 gcc/tree-vect-data-refs.cc | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc
index c531079d3bbf..ae237407672c 100644
--- a/gcc/tree-vect-data-refs.cc
+++ b/gcc/tree-vect-data-refs.cc
@@ -2290,8 +2290,11 @@ vect_enhance_data_refs_alignment (loop_vec_info 
loop_vinfo)
               if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
                {
                  poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
-                 nscalars = (STMT_SLP_TYPE (stmt_info)
-                             ? vf * DR_GROUP_SIZE (stmt_info) : vf);
+                 unsigned group_size = 1;
+                 if (STMT_SLP_TYPE (stmt_info)
+                     && STMT_VINFO_GROUPED_ACCESS (stmt_info))
+                   group_size = DR_GROUP_SIZE (stmt_info);
+                 nscalars = vf * group_size;
                }
 
              /* Save info about DR in the hash table.  Also include peeling

Reply via email to