Prune unreachable paths

        * tree-vect-stmts.cc (vectorizable_store): Remove non-SLP
        paths.
---
 gcc/tree-vect-stmts.cc | 591 +++++++++++++----------------------------
 1 file changed, 192 insertions(+), 399 deletions(-)

diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 23316a49b3d..bf1edabf0d4 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -8357,7 +8357,6 @@ vectorizable_store (vec_info *vinfo,
   stmt_vec_info first_stmt_info;
   bool grouped_store;
   unsigned int group_size, i;
-  bool slp = true;
   unsigned int vec_num;
   bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
   tree aggr_type;
@@ -8403,7 +8402,7 @@ vectorizable_store (vec_info *vinfo,
        return false;
 
       int mask_index = internal_fn_mask_index (ifn);
-      if (mask_index >= 0 && 1)
+      if (mask_index >= 0)
        mask_index = vect_slp_child_index_for_operand
                    (call, mask_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info));
       if (mask_index >= 0
@@ -8415,7 +8414,7 @@ vectorizable_store (vec_info *vinfo,
 
   /* Cannot have hybrid store SLP -- that would mean storing to the
      same location twice.  */
-  gcc_assert (1 == PURE_SLP_STMT (stmt_info));
+  gcc_assert (PURE_SLP_STMT (stmt_info));
 
   tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
@@ -8431,17 +8430,14 @@ vectorizable_store (vec_info *vinfo,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (1)
-    ncopies = 1;
-  else
-    ncopies = vect_get_num_copies (loop_vinfo, vectype);
+  ncopies = 1;
 
   gcc_assert (ncopies >= 1);
 
   /* FORNOW.  This restriction should be relaxed.  */
   if (loop
       && nested_in_vect_loop_p (loop, stmt_info)
-      && (ncopies > 1 || (1 && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)))
+      && (ncopies > 1 || SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1))
     {
       if (dump_enabled_p ())
        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8472,8 +8468,7 @@ vectorizable_store (vec_info *vinfo,
                            &lanes_ifn))
     return false;
 
-  if (1
-      && slp_node->ldst_lanes
+  if (slp_node->ldst_lanes
       && memory_access_type != VMAT_LOAD_STORE_LANES)
     {
       if (dump_enabled_p ())
@@ -8520,8 +8515,7 @@ vectorizable_store (vec_info *vinfo,
 
   dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
   grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
-                  && memory_access_type != VMAT_GATHER_SCATTER
-                  && (1 || memory_access_type != VMAT_CONTIGUOUS));
+                  && memory_access_type != VMAT_GATHER_SCATTER);
   if (grouped_store)
     {
       first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
@@ -8546,8 +8540,7 @@ vectorizable_store (vec_info *vinfo,
   if (costing_p) /* transformation not required.  */
     {
       STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
-      if (1)
-       SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type;
+      SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type;
 
       if (loop_vinfo
          && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
@@ -8556,11 +8549,10 @@ vectorizable_store (vec_info *vinfo,
                                              memory_access_type, &gs_info,
                                              mask);
 
-      if (1
-         && (!vect_maybe_update_slp_op_vectype (op_node, vectype)
-             || (mask
-                 && !vect_maybe_update_slp_op_vectype (mask_node,
-                                                       mask_vectype))))
+      if (!vect_maybe_update_slp_op_vectype (op_node, vectype)
+         || (mask
+             && !vect_maybe_update_slp_op_vectype (mask_node,
+                                                   mask_vectype)))
        {
          if (dump_enabled_p ())
            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8578,22 +8570,8 @@ vectorizable_store (vec_info *vinfo,
                         "Vectorizing an unaligned access.\n");
 
       STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
-
-      /* As function vect_transform_stmt shows, for interleaving stores
-        the whole chain is vectorized when the last store in the chain
-        is reached, the other stores in the group are skipped.  So we
-        want to only cost the last one here, but it's not trivial to
-        get the last, as it's equivalent to use the first one for
-        costing, use the first one instead.  */
-      if (grouped_store
-         && !1
-         && first_stmt_info != stmt_info)
-       return true;
     }
-  if (1)
-    gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info));
-  else
-    gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE 
(stmt_info));
+  gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info));
 
   /* Transform.  */
 
@@ -8602,7 +8580,7 @@ vectorizable_store (vec_info *vinfo,
   if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3)
     {
       gcc_assert (memory_access_type == VMAT_CONTIGUOUS);
-      gcc_assert (!1 || SLP_TREE_LANES (slp_node) == 1);
+      gcc_assert (SLP_TREE_LANES (slp_node) == 1);
       if (costing_p)
        {
          unsigned int inside_cost = 0, prologue_cost = 0;
@@ -8625,79 +8603,39 @@ vectorizable_store (vec_info *vinfo,
                                      gsi, vec_stmt, ncopies);
     }
 
-  if (grouped_store || 1)
-    {
-      /* FORNOW */
-      gcc_assert (!grouped_store
-                 || !loop
-                 || !nested_in_vect_loop_p (loop, stmt_info));
+  /* FORNOW */
+  gcc_assert (!grouped_store
+             || !loop
+             || !nested_in_vect_loop_p (loop, stmt_info));
 
-      if (1)
-        {
-          grouped_store = false;
-          /* VEC_NUM is the number of vect stmts to be created for this
-             group.  */
-          vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
-         first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
-         gcc_assert (!STMT_VINFO_GROUPED_ACCESS (first_stmt_info)
-                     || (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
-                         == first_stmt_info));
-         first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
-         op = vect_get_store_rhs (first_stmt_info);
-        }
-      else
-        /* VEC_NUM is the number of vect stmts to be created for this
-           group.  */
-       vec_num = group_size;
+  grouped_store = false;
+  /* VEC_NUM is the number of vect stmts to be created for this
+     group.  */
+  vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+  first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
+  gcc_assert (!STMT_VINFO_GROUPED_ACCESS (first_stmt_info)
+             || (DR_GROUP_FIRST_ELEMENT (first_stmt_info) == first_stmt_info));
+  first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
+  op = vect_get_store_rhs (first_stmt_info);
 
-      ref_type = get_group_alias_ptr_type (first_stmt_info);
-    }
-  else
-    ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
+  ref_type = get_group_alias_ptr_type (first_stmt_info);
 
   if (!costing_p && dump_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location, "transform store. ncopies = 
%d\n",
                     ncopies);
 
-  /* Check if we need to update prologue cost for invariant,
-     and update it accordingly if so.  If it's not for
-     interleaving store, we can just check vls_type; but if
-     it's for interleaving store, need to check the def_type
-     of the stored value since the current vls_type is just
-     for first_stmt_info.  */
-  auto update_prologue_cost = [&](unsigned *prologue_cost, tree store_rhs)
-  {
-    gcc_assert (costing_p);
-    if (1)
-      return;
-    if (grouped_store)
-      {
-       gcc_assert (store_rhs);
-       enum vect_def_type cdt;
-       gcc_assert (vect_is_simple_use (store_rhs, vinfo, &cdt));
-       if (cdt != vect_constant_def && cdt != vect_external_def)
-         return;
-      }
-    else if (vls_type != VLS_STORE_INVARIANT)
-      return;
-    *prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
-                                       slp_node, 0, vect_prologue);
-  };
-
   if (memory_access_type == VMAT_ELEMENTWISE
       || memory_access_type == VMAT_STRIDED_SLP)
     {
       unsigned inside_cost = 0, prologue_cost = 0;
       gimple_stmt_iterator incr_gsi;
       bool insert_after;
-      gimple *incr;
       tree offvar = NULL_TREE;
       tree ivstep;
       tree running_off;
       tree stride_base, stride_step, alias_off;
       tree vec_oprnd = NULL_TREE;
       tree dr_offset;
-      unsigned int g;
       /* Checked by get_load_store_type.  */
       unsigned int const_nunits = nunits.to_constant ();
 
@@ -8735,116 +8673,112 @@ vectorizable_store (vec_info *vinfo,
       unsigned lnel = 1;
       tree ltype = elem_type;
       tree lvectype = vectype;
-      if (1)
-       {
-         HOST_WIDE_INT n = gcd (group_size, const_nunits);
-         if (n == const_nunits)
-           {
-             int mis_align = dr_misalignment (first_dr_info, vectype);
-             /* With VF > 1 we advance the DR by step, if that is constant
-                and only aligned when performed VF times, DR alignment
-                analysis can analyze this as aligned since it assumes
-                contiguous accesses.  But that is not how we code generate
-                here, so adjust for this.  */
-             if (maybe_gt (vf, 1u)
-                 && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
-                                 DR_TARGET_ALIGNMENT (first_dr_info)))
-               mis_align = -1;
-             dr_alignment_support dr_align
-               = vect_supportable_dr_alignment (vinfo, dr_info, vectype,
-                                                mis_align);
-             if (dr_align == dr_aligned
-                 || dr_align == dr_unaligned_supported)
-               {
-                 nstores = 1;
-                 lnel = const_nunits;
-                 ltype = vectype;
-                 lvectype = vectype;
-                 alignment_support_scheme = dr_align;
-                 misalignment = mis_align;
-               }
-           }
-         else if (n > 1)
-           {
-             nstores = const_nunits / n;
-             lnel = n;
-             ltype = build_vector_type (elem_type, n);
+      HOST_WIDE_INT n = gcd (group_size, const_nunits);
+      if (n == const_nunits)
+       {
+         int mis_align = dr_misalignment (first_dr_info, vectype);
+         /* With VF > 1 we advance the DR by step, if that is constant
+            and only aligned when performed VF times, DR alignment
+            analysis can analyze this as aligned since it assumes
+            contiguous accesses.  But that is not how we code generate
+            here, so adjust for this.  */
+         if (maybe_gt (vf, 1u)
+             && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
+                             DR_TARGET_ALIGNMENT (first_dr_info)))
+           mis_align = -1;
+         dr_alignment_support dr_align
+             = vect_supportable_dr_alignment (vinfo, dr_info, vectype,
+                                              mis_align);
+         if (dr_align == dr_aligned
+             || dr_align == dr_unaligned_supported)
+           {
+             nstores = 1;
+             lnel = const_nunits;
+             ltype = vectype;
              lvectype = vectype;
-             int mis_align = dr_misalignment (first_dr_info, ltype);
-             if (maybe_gt (vf, 1u)
-                 && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
-                                 DR_TARGET_ALIGNMENT (first_dr_info)))
-               mis_align = -1;
-             dr_alignment_support dr_align
-               = vect_supportable_dr_alignment (vinfo, dr_info, ltype,
-                                                mis_align);
              alignment_support_scheme = dr_align;
              misalignment = mis_align;
-
-             /* First check if vec_extract optab doesn't support extraction
-                of vector elts directly.  */
-             scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
-             machine_mode vmode;
-             if (!VECTOR_MODE_P (TYPE_MODE (vectype))
-                 || !related_vector_mode (TYPE_MODE (vectype), elmode,
-                                          n).exists (&vmode)
-                 || (convert_optab_handler (vec_extract_optab,
-                                            TYPE_MODE (vectype), vmode)
-                     == CODE_FOR_nothing)
-                 || !(dr_align == dr_aligned
-                      || dr_align == dr_unaligned_supported))
-               {
-                 /* Try to avoid emitting an extract of vector elements
-                    by performing the extracts using an integer type of the
-                    same size, extracting from a vector of those and then
-                    re-interpreting it as the original vector type if
-                    supported.  */
-                 unsigned lsize
-                   = n * GET_MODE_BITSIZE (elmode);
-                 unsigned int lnunits = const_nunits / n;
-                 /* If we can't construct such a vector fall back to
-                    element extracts from the original vector type and
-                    element size stores.  */
-                 if (int_mode_for_size (lsize, 0).exists (&elmode)
-                     && VECTOR_MODE_P (TYPE_MODE (vectype))
-                     && related_vector_mode (TYPE_MODE (vectype), elmode,
-                                             lnunits).exists (&vmode)
-                     && (convert_optab_handler (vec_extract_optab,
-                                                vmode, elmode)
-                         != CODE_FOR_nothing))
-                   {
-                     nstores = lnunits;
-                     lnel = n;
-                     ltype = build_nonstandard_integer_type (lsize, 1);
-                     lvectype = build_vector_type (ltype, nstores);
-                   }
-                 /* Else fall back to vector extraction anyway.
-                    Fewer stores are more important than avoiding spilling
-                    of the vector we extract from.  Compared to the
-                    construction case in vectorizable_load no store-forwarding
-                    issue exists here for reasonable archs.  But only
-                    if the store is supported.  */
-                 else if (!(dr_align == dr_aligned
-                            || dr_align == dr_unaligned_supported))
-                   {
-                     nstores = const_nunits;
-                     lnel = 1;
-                     ltype = elem_type;
-                     lvectype = vectype;
-                   }
-               }
            }
-         unsigned align;
-         if (alignment_support_scheme == dr_aligned)
-           align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
-         else
-           align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
-         /* Alignment is at most the access size if we do multiple stores.  */
-         if (nstores > 1)
-           align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
-         ltype = build_aligned_type (ltype, align * BITS_PER_UNIT);
-         ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
        }
+      else if (n > 1)
+       {
+         nstores = const_nunits / n;
+         lnel = n;
+         ltype = build_vector_type (elem_type, n);
+         lvectype = vectype;
+         int mis_align = dr_misalignment (first_dr_info, ltype);
+         if (maybe_gt (vf, 1u)
+             && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
+                             DR_TARGET_ALIGNMENT (first_dr_info)))
+           mis_align = -1;
+         dr_alignment_support dr_align
+           = vect_supportable_dr_alignment (vinfo, dr_info, ltype,
+                                            mis_align);
+         alignment_support_scheme = dr_align;
+         misalignment = mis_align;
+
+         /* First check if vec_extract optab doesn't support extraction
+            of vector elts directly.  */
+         scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
+         machine_mode vmode;
+         if (!VECTOR_MODE_P (TYPE_MODE (vectype))
+             || !related_vector_mode (TYPE_MODE (vectype), elmode,
+                                      n).exists (&vmode)
+             || (convert_optab_handler (vec_extract_optab,
+                                        TYPE_MODE (vectype), vmode)
+                 == CODE_FOR_nothing)
+             || !(dr_align == dr_aligned
+                  || dr_align == dr_unaligned_supported))
+           {
+             /* Try to avoid emitting an extract of vector elements
+                by performing the extracts using an integer type of the
+                same size, extracting from a vector of those and then
+                re-interpreting it as the original vector type if
+                supported.  */
+             unsigned lsize = n * GET_MODE_BITSIZE (elmode);
+             unsigned int lnunits = const_nunits / n;
+             /* If we can't construct such a vector fall back to
+                element extracts from the original vector type and
+                element size stores.  */
+             if (int_mode_for_size (lsize, 0).exists (&elmode)
+                 && VECTOR_MODE_P (TYPE_MODE (vectype))
+                 && related_vector_mode (TYPE_MODE (vectype), elmode,
+                                         lnunits).exists (&vmode)
+                 && (convert_optab_handler (vec_extract_optab,
+                                            vmode, elmode)
+                     != CODE_FOR_nothing))
+               {
+                 nstores = lnunits;
+                 lnel = n;
+                 ltype = build_nonstandard_integer_type (lsize, 1);
+                 lvectype = build_vector_type (ltype, nstores);
+               }
+             /* Else fall back to vector extraction anyway.
+                Fewer stores are more important than avoiding spilling
+                of the vector we extract from.  Compared to the
+                construction case in vectorizable_load no store-forwarding
+                issue exists here for reasonable archs.  But only
+                if the store is supported.  */
+                else if (!(dr_align == dr_aligned
+                           || dr_align == dr_unaligned_supported))
+                  {
+                    nstores = const_nunits;
+                    lnel = 1;
+                    ltype = elem_type;
+                    lvectype = vectype;
+                  }
+           }
+       }
+      unsigned align;
+      if (alignment_support_scheme == dr_aligned)
+       align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+      else
+       align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+      /* Alignment is at most the access size if we do multiple stores.  */
+      if (nstores > 1)
+       align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
+      ltype = build_aligned_type (ltype, align * BITS_PER_UNIT);
+      ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
 
       if (!costing_p)
        {
@@ -8858,7 +8792,6 @@ vectorizable_store (vec_info *vinfo,
          ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
          create_iv (stride_base, PLUS_EXPR, ivstep, NULL, loop, &incr_gsi,
                     insert_after, &offvar, NULL);
-         incr = gsi_stmt (incr_gsi);
 
          stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
        }
@@ -8869,104 +8802,68 @@ vectorizable_store (vec_info *vinfo,
       /* For costing some adjacent vector stores, we'd like to cost with
         the total number of them once instead of cost each one by one. */
       unsigned int n_adjacent_stores = 0;
-      for (g = 0; g < group_size; g++)
+      running_off = offvar;
+      if (!costing_p)
+       vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies, op,
+                          &vec_oprnds);
+      unsigned int group_el = 0;
+      unsigned HOST_WIDE_INT elsz
+       = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
+      for (j = 0; j < ncopies; j++)
        {
-         running_off = offvar;
          if (!costing_p)
            {
-             if (g)
+             vec_oprnd = vec_oprnds[j];
+             /* Pun the vector to extract from if necessary.  */
+             if (lvectype != vectype)
                {
-                 tree size = TYPE_SIZE_UNIT (ltype);
-                 tree pos
-                   = fold_build2 (MULT_EXPR, sizetype, size_int (g), size);
-                 tree newoff = copy_ssa_name (running_off, NULL);
-                 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
-                                             running_off, pos);
-                 vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
-                 running_off = newoff;
+                 tree tem = make_ssa_name (lvectype);
+                 tree cvt = build1 (VIEW_CONVERT_EXPR, lvectype, vec_oprnd);
+                 gimple *pun = gimple_build_assign (tem, cvt);
+                 vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi);
+                 vec_oprnd = tem;
                }
            }
-         if (!1)
-           op = vect_get_store_rhs (next_stmt_info);
-         if (!costing_p)
-           vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies, op,
-                              &vec_oprnds);
-         else
-           update_prologue_cost (&prologue_cost, op);
-         unsigned int group_el = 0;
-         unsigned HOST_WIDE_INT
-           elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
-         for (j = 0; j < ncopies; j++)
+         for (i = 0; i < nstores; i++)
            {
-             if (!costing_p)
+             if (costing_p)
                {
-                 vec_oprnd = vec_oprnds[j];
-                 /* Pun the vector to extract from if necessary.  */
-                 if (lvectype != vectype)
-                   {
-                     tree tem = make_ssa_name (lvectype);
-                     tree cvt
-                       = build1 (VIEW_CONVERT_EXPR, lvectype, vec_oprnd);
-                     gimple *pun = gimple_build_assign (tem, cvt);
-                     vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi);
-                     vec_oprnd = tem;
-                   }
+                 n_adjacent_stores++;
+                 continue;
                }
-             for (i = 0; i < nstores; i++)
+             tree newref, newoff;
+             gimple *incr, *assign;
+             tree size = TYPE_SIZE (ltype);
+             /* Extract the i'th component.  */
+             tree pos = fold_build2 (MULT_EXPR, bitsizetype,
+                                     bitsize_int (i), size);
+             tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
+                                      size, pos);
+
+             elem = force_gimple_operand_gsi (gsi, elem, true, NULL_TREE, true,
+                                              GSI_SAME_STMT);
+
+             tree this_off = build_int_cst (TREE_TYPE (alias_off),
+                                            group_el * elsz);
+             newref = build2 (MEM_REF, ltype, running_off, this_off);
+             vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
+
+             /* And store it to *running_off.  */
+             assign = gimple_build_assign (newref, elem);
+             vect_finish_stmt_generation (vinfo, stmt_info, assign, gsi);
+
+             group_el += lnel;
+             if (group_el == group_size)
                {
-                 if (costing_p)
-                   {
-                     n_adjacent_stores++;
-                     continue;
-                   }
-                 tree newref, newoff;
-                 gimple *incr, *assign;
-                 tree size = TYPE_SIZE (ltype);
-                 /* Extract the i'th component.  */
-                 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
-                                         bitsize_int (i), size);
-                 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
-                                          size, pos);
-
-                 elem = force_gimple_operand_gsi (gsi, elem, true,
-                                                  NULL_TREE, true,
-                                                  GSI_SAME_STMT);
-
-                 tree this_off = build_int_cst (TREE_TYPE (alias_off),
-                                                group_el * elsz);
-                 newref = build2 (MEM_REF, ltype,
-                                  running_off, this_off);
-                 vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
-
-                 /* And store it to *running_off.  */
-                 assign = gimple_build_assign (newref, elem);
-                 vect_finish_stmt_generation (vinfo, stmt_info, assign, gsi);
-
-                 group_el += lnel;
-                 if (! 1
-                     || group_el == group_size)
-                   {
-                     newoff = copy_ssa_name (running_off, NULL);
-                     incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
-                                                 running_off, stride_step);
-                     vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
+                 newoff = copy_ssa_name (running_off, NULL);
+                 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
+                                             running_off, stride_step);
+                 vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
 
-                     running_off = newoff;
-                     group_el = 0;
-                   }
-                 if (g == group_size - 1
-                     && !1)
-                   {
-                     if (j == 0 && i == 0)
-                       *vec_stmt = assign;
-                     STMT_VINFO_VEC_STMTS (stmt_info).safe_push (assign);
-                   }
+                 running_off = newoff;
+                 group_el = 0;
                }
            }
-         next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
-         vec_oprnds.truncate(0);
-         if (1)
-           break;
        }
 
       if (costing_p)
@@ -9107,7 +9004,7 @@ vectorizable_store (vec_info *vinfo,
 
   if (memory_access_type == VMAT_LOAD_STORE_LANES)
     {
-      if (costing_p && 1)
+      if (costing_p)
        /* Update all incoming store operand nodes, the general handling
           above only handles the mask and the first store operand node.  */
        for (slp_tree child : SLP_TREE_CHILDREN (slp_node))
@@ -9123,49 +9020,18 @@ vectorizable_store (vec_info *vinfo,
       /* For costing some adjacent vector stores, we'd like to cost with
         the total number of them once instead of cost each one by one. */
       unsigned int n_adjacent_stores = 0;
-      if (1)
-       ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) / group_size;
+      int ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) / group_size;
       for (j = 0; j < ncopies; j++)
        {
-         gimple *new_stmt;
          if (j == 0)
            {
-             /* For interleaved stores we collect vectorized defs for all
-                the stores in the group in DR_CHAIN. DR_CHAIN is then used
-                as an input to vect_permute_store_chain().  */
-             stmt_vec_info next_stmt_info = first_stmt_info;
-             for (i = 0; i < group_size; i++)
-               {
-                 /* Since gaps are not supported for interleaved stores,
-                    DR_GROUP_SIZE is the exact number of stmts in the
-                    chain. Therefore, NEXT_STMT_INFO can't be NULL_TREE.  */
-                 op = vect_get_store_rhs (next_stmt_info);
-                 if (costing_p)
-                   update_prologue_cost (&prologue_cost, op);
-                 else if (!1)
-                   {
-                     vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
-                                                    ncopies, op,
-                                                    gvec_oprnds[i]);
-                     vec_oprnd = (*gvec_oprnds[i])[0];
-                     dr_chain.quick_push (vec_oprnd);
-                   }
-                 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
-               }
-
              if (!costing_p)
                {
                  if (mask)
                    {
-                     if (1)
-                       vect_get_slp_defs (mask_node, &vec_masks);
-                     else
-                       vect_get_vec_defs_for_operand (vinfo, stmt_info, 
ncopies,
-                                                      mask, &vec_masks,
-                                                      mask_vectype);
+                     vect_get_slp_defs (mask_node, &vec_masks);
                      vec_mask = vec_masks[0];
                    }
-
                  dataref_ptr
                    = vect_create_data_ref_ptr (vinfo, first_stmt_info,
                                                aggr_type, NULL, offset, &dummy,
@@ -9175,19 +9041,6 @@ vectorizable_store (vec_info *vinfo,
          else if (!costing_p)
            {
              gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
-             /* DR_CHAIN is then used as an input to
-                vect_permute_store_chain().  */
-             if (!1)
-               {
-                 /* We should have caught mismatched types earlier.  */
-                 gcc_assert (
-                   useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd)));
-                 for (i = 0; i < group_size; i++)
-                   {
-                     vec_oprnd = (*gvec_oprnds[i])[j];
-                     dr_chain[i] = vec_oprnd;
-                   }
-               }
              if (mask)
                vec_mask = vec_masks[j];
              dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
@@ -9211,17 +9064,12 @@ vectorizable_store (vec_info *vinfo,
          /* Store the individual vectors into the array.  */
          for (i = 0; i < group_size; i++)
            {
-             if (1)
-               {
-                 slp_tree child;
-                 if (i == 0 || !mask_node)
-                   child = SLP_TREE_CHILDREN (slp_node)[i];
-                 else
-                   child = SLP_TREE_CHILDREN (slp_node)[i + 1];
-                 vec_oprnd = SLP_TREE_VEC_DEFS (child)[j];
-               }
+             slp_tree child;
+             if (i == 0 || !mask_node)
+               child = SLP_TREE_CHILDREN (slp_node)[i];
              else
-               vec_oprnd = dr_chain[i];
+               child = SLP_TREE_CHILDREN (slp_node)[i + 1];
+             vec_oprnd = SLP_TREE_VEC_DEFS (child)[j];
              write_vector_array (vinfo, stmt_info, gsi, vec_oprnd, vec_array,
                                  i);
            }
@@ -9287,14 +9135,9 @@ vectorizable_store (vec_info *vinfo,
            }
          gimple_call_set_nothrow (call, true);
          vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
-         new_stmt = call;
 
          /* Record that VEC_ARRAY is now dead.  */
          vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
-         if (j == 0 && !1)
-           *vec_stmt = new_stmt;
-         if (!1)
-           STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
        }
 
       if (costing_p)
@@ -9332,21 +9175,9 @@ vectorizable_store (vec_info *vinfo,
                  /* Since the store is not grouped, DR_GROUP_SIZE is 1, and
                     DR_CHAIN is of size 1.  */
                  gcc_assert (group_size == 1);
-                 if (1)
-                   vect_get_slp_defs (op_node, gvec_oprnds[0]);
-                 else
-                   vect_get_vec_defs_for_operand (vinfo, first_stmt_info,
-                                                  num_stmts, op, 
gvec_oprnds[0]);
+                 vect_get_slp_defs (op_node, gvec_oprnds[0]);
                  if (mask)
-                   {
-                     if (1)
-                       vect_get_slp_defs (mask_node, &vec_masks);
-                     else
-                       vect_get_vec_defs_for_operand (vinfo, stmt_info,
-                                                      num_stmts,
-                                                      mask, &vec_masks,
-                                                      mask_vectype);
-                   }
+                   vect_get_slp_defs (mask_node, &vec_masks);
 
                  if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
                    vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info,
@@ -9638,17 +9469,10 @@ vectorizable_store (vec_info *vinfo,
                  vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
                }
 
-             if (1)
-               slp_node->push_vec_def (new_stmt);
+             slp_node->push_vec_def (new_stmt);
            }
-
-         if (!1 && !costing_p)
-           STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
        }
 
-      if (!1 && !costing_p)
-       *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
-
       if (costing_p && dump_enabled_p ())
        dump_printf_loc (MSG_NOTE, vect_location,
                         "vect_model_store_cost: inside_cost = %d, "
@@ -9674,7 +9498,7 @@ vectorizable_store (vec_info *vinfo,
       gimple *new_stmt;
       if (j == 0)
        {
-         if (1 && !costing_p)
+         if (!costing_p)
            {
              /* Get vectorized arguments for SLP_NODE.  */
              vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op,
@@ -9700,9 +9524,7 @@ vectorizable_store (vec_info *vinfo,
                     that there is no interleaving, DR_GROUP_SIZE is 1,
                     and only one iteration of the loop will be executed.  */
                  op = vect_get_store_rhs (next_stmt_info);
-                 if (costing_p)
-                   update_prologue_cost (&prologue_cost, op);
-                 else
+                 if (!costing_p)
                    {
                      vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
                                                     ncopies, op,
@@ -9789,18 +9611,10 @@ vectorizable_store (vec_info *vinfo,
                                      gsi, &result_chain);
        }
 
-      stmt_vec_info next_stmt_info = first_stmt_info;
       for (i = 0; i < vec_num; i++)
        {
          if (!costing_p)
-           {
-             if (1)
-               vec_oprnd = vec_oprnds[i];
-             else if (grouped_store)
-               /* For grouped stores vectorized defs are interleaved in
-                  vect_permute_store_chain().  */
-               vec_oprnd = result_chain[i];
-           }
+           vec_oprnd = vec_oprnds[i];
 
          if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
            {
@@ -9829,14 +9643,6 @@ vectorizable_store (vec_info *vinfo,
          if (costing_p)
            {
              n_adjacent_stores++;
-
-             if (!1)
-               {
-                 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
-                 if (!next_stmt_info)
-                   break;
-               }
-
              continue;
            }
 
@@ -9847,7 +9653,7 @@ vectorizable_store (vec_info *vinfo,
            final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
                                             vec_num * ncopies, vectype,
                                             vec_num * j + i);
-         if (1 && vec_mask)
+         if (vec_mask)
            vec_mask = vec_masks[i];
          if (vec_mask)
            final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask,
@@ -9980,19 +9786,6 @@ vectorizable_store (vec_info *vinfo,
              new_stmt = gimple_build_assign (data_ref, vec_oprnd);
              vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
            }
-
-         if (1)
-           continue;
-
-         next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
-         if (!next_stmt_info)
-           break;
-       }
-      if (!1 && !costing_p)
-       {
-         if (j == 0)
-           *vec_stmt = new_stmt;
-         STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
        }
     }
 
-- 
2.43.0


Reply via email to