This patch leverage current MASK_GATHER_LOAD to support SLP
MASK_LEN_GATHER_LOAD with condtional mask.
Unconditional MASK_LEN_GATHER_LOAD (base, offset, scale, zero, -1) SLP is not
included in this patch
since it seems that we can't support it in the middle-end (due to PR44306).
May be we should support GATHER_LOAD explictily in RISC-V backend to walk
around this issue.
I am gonna support GATHER_LOAD explictly work around in RISC-V backend.
This patch also adds conditional gather load test since there is no conditional
gather load test.
Ok for trunk ?
gcc/ChangeLog:
* tree-vect-slp.cc (vect_get_operand_map): Add MASK_LEN_GATHER_LOAD.
(vect_build_slp_tree_1): Ditto.
(vect_build_slp_tree_2): Ditto.
gcc/testsuite/ChangeLog:
* gcc.dg/vect/vect-gather-6.c: New test.
---
gcc/testsuite/gcc.dg/vect/vect-gather-6.c | 15 +++++++++++++++
gcc/tree-vect-slp.cc | 8 ++++++--
2 files changed, 21 insertions(+), 2 deletions(-)
create mode 100644 gcc/testsuite/gcc.dg/vect/vect-gather-6.c
diff --git a/gcc/testsuite/gcc.dg/vect/vect-gather-6.c
b/gcc/testsuite/gcc.dg/vect/vect-gather-6.c
new file mode 100644
index 00000000000..ff55f321854
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-gather-6.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+
+void
+f (int *restrict y, int *restrict x, int *restrict indices, int *restrict
cond, int n)
+{
+ for (int i = 0; i < n; ++i)
+ {
+ if (cond[i * 2])
+ y[i * 2] = x[indices[i * 2]] + 1;
+ if (cond[i * 2 + 1])
+ y[i * 2 + 1] = x[indices[i * 2 + 1]] + 2;
+ }
+}
+
+/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" vect { target
vect_gather_load_ifn } } } */
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 5eb310eceaf..0c197b50054 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -564,6 +564,7 @@ vect_get_operand_map (const gimple *stmt, bool
gather_scatter_p = false,
return arg1_map;
case IFN_MASK_GATHER_LOAD:
+ case IFN_MASK_LEN_GATHER_LOAD:
return arg1_arg4_map;
case IFN_MASK_STORE:
@@ -1158,7 +1159,8 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char
*swap,
if (cfn == CFN_MASK_LOAD
|| cfn == CFN_GATHER_LOAD
- || cfn == CFN_MASK_GATHER_LOAD)
+ || cfn == CFN_MASK_GATHER_LOAD
+ || cfn == CFN_MASK_LEN_GATHER_LOAD)
ldst_p = true;
else if (cfn == CFN_MASK_STORE)
{
@@ -1425,6 +1427,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char
*swap,
if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info))
&& rhs_code != CFN_GATHER_LOAD
&& rhs_code != CFN_MASK_GATHER_LOAD
+ && rhs_code != CFN_MASK_LEN_GATHER_LOAD
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info)
/* Not grouped loads are handled as externals for BB
vectorization. For loop vectorization we can handle
@@ -1927,7 +1930,8 @@ vect_build_slp_tree_2 (vec_info *vinfo, slp_tree node,
if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt))
gcc_assert (gimple_call_internal_p (stmt, IFN_MASK_LOAD)
|| gimple_call_internal_p (stmt, IFN_GATHER_LOAD)
- || gimple_call_internal_p (stmt, IFN_MASK_GATHER_LOAD));
+ || gimple_call_internal_p (stmt, IFN_MASK_GATHER_LOAD)
+ || gimple_call_internal_p (stmt,
IFN_MASK_LEN_GATHER_LOAD));
else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
gcc_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
else
--
2.36.3