This patch enables MVE vbic instructions for auto-vectorization. MVE vbicq insns in mve.md are modified to use 'and not' instead of unspec expression.
2020-12-11 Christophe Lyon <christophe.l...@linaro.org> gcc/ * config/arm/iterators.md (supf): Remove VBICQ_S and VBICQ_U. (VBICQ): Remove. * config/arm/mve.md (mve_vbicq_u<mode>): New entry for vbic instruction using expression and not. (mve_vbicq_s<mode>): New expander. (mve_vbicq_f<mode>): Replace use of unspec by 'and not'. * config/arm/unspecs.md (VBICQ_S, VBICQ_U, VBICQ_F): Remove. gcc/testsuite/ * gcc.target/arm/simd/mve-vbic.c: Add tests for vbic. --- gcc/config/arm/iterators.md | 3 +- gcc/config/arm/mve.md | 23 ++++++---- gcc/config/arm/unspecs.md | 3 -- gcc/testsuite/gcc.target/arm/simd/mve-vbic.c | 65 ++++++++++++++++++++++++++++ 4 files changed, 81 insertions(+), 13 deletions(-) create mode 100644 gcc/testsuite/gcc.target/arm/simd/mve-vbic.c diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md index 0195275..26351e0 100644 --- a/gcc/config/arm/iterators.md +++ b/gcc/config/arm/iterators.md @@ -1232,7 +1232,7 @@ (define_int_attr supf [(VCVTQ_TO_F_S "s") (VCVTQ_TO_F_U "u") (VREV16Q_S "s") (VADDLVQ_P_U "u") (VCMPNEQ_U "u") (VCMPNEQ_S "s") (VABDQ_M_S "s") (VABDQ_M_U "u") (VABDQ_S "s") (VABDQ_U "u") (VADDQ_N_S "s") (VADDQ_N_U "u") - (VADDVQ_P_S "s") (VADDVQ_P_U "u") (VBICQ_S "s") (VBICQ_U "u") + (VADDVQ_P_S "s") (VADDVQ_P_U "u") (VBRSRQ_N_S "s") (VBRSRQ_N_U "u") (VCADDQ_ROT270_S "s") (VCADDQ_ROT270_U "u") (VCADDQ_ROT90_S "s") (VCMPEQQ_S "s") (VCMPEQQ_U "u") (VCADDQ_ROT90_U "u") @@ -1500,7 +1500,6 @@ (define_int_iterator VABDQ [VABDQ_S VABDQ_U]) (define_int_iterator VADDQ_N [VADDQ_N_S VADDQ_N_U]) (define_int_iterator VADDVAQ [VADDVAQ_S VADDVAQ_U]) (define_int_iterator VADDVQ_P [VADDVQ_P_U VADDVQ_P_S]) -(define_int_iterator VBICQ [VBICQ_S VBICQ_U]) (define_int_iterator VBRSRQ_N [VBRSRQ_N_U VBRSRQ_N_S]) (define_int_iterator VCADDQ_ROT270 [VCADDQ_ROT270_S VCADDQ_ROT270_U]) (define_int_iterator VCADDQ_ROT90 [VCADDQ_ROT90_U VCADDQ_ROT90_S]) diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md index 10512ad..0505537 100644 --- a/gcc/config/arm/mve.md +++ b/gcc/config/arm/mve.md @@ -922,18 +922,26 @@ (define_expand "mve_vandq_s<mode>" ;; ;; [vbicq_s, vbicq_u]) ;; -(define_insn "mve_vbicq_<supf><mode>" +(define_insn "mve_vbicq_u<mode>" [ (set (match_operand:MVE_2 0 "s_register_operand" "=w") - (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w") - (match_operand:MVE_2 2 "s_register_operand" "w")] - VBICQ)) + (and:MVE_2 (not:MVE_2 (match_operand:MVE_2 2 "s_register_operand" "w")) + (match_operand:MVE_2 1 "s_register_operand" "w"))) ] "TARGET_HAVE_MVE" - "vbic %q0, %q1, %q2" + "vbic\t%q0, %q1, %q2" [(set_attr "type" "mve_move") ]) +(define_expand "mve_vbicq_s<mode>" + [ + (set (match_operand:MVE_2 0 "s_register_operand") + (and:MVE_2 (not:MVE_2 (match_operand:MVE_2 2 "s_register_operand")) + (match_operand:MVE_2 1 "s_register_operand"))) + ] + "TARGET_HAVE_MVE" +) + ;; ;; [vbrsrq_n_u, vbrsrq_n_s]) ;; @@ -2066,9 +2074,8 @@ (define_insn "mve_vandq_f<mode>" (define_insn "mve_vbicq_f<mode>" [ (set (match_operand:MVE_0 0 "s_register_operand" "=w") - (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w") - (match_operand:MVE_0 2 "s_register_operand" "w")] - VBICQ_F)) + (and:MVE_0 (not:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")) + (match_operand:MVE_0 2 "s_register_operand" "w"))) ] "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" "vbic %q0, %q1, %q2" diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md index fe240e8..8a4389a 100644 --- a/gcc/config/arm/unspecs.md +++ b/gcc/config/arm/unspecs.md @@ -601,7 +601,6 @@ (define_c_enum "unspec" [ VADDQ_N_S VADDVAQ_S VADDVQ_P_S - VBICQ_S VBRSRQ_N_S VCADDQ_ROT270_S VCADDQ_ROT90_S @@ -645,7 +644,6 @@ (define_c_enum "unspec" [ VADDQ_N_U VADDVAQ_U VADDVQ_P_U - VBICQ_U VBRSRQ_N_U VCADDQ_ROT270_U VCADDQ_ROT90_U @@ -715,7 +713,6 @@ (define_c_enum "unspec" [ VABDQ_M_U VABDQ_F VADDQ_N_F - VBICQ_F VCADDQ_ROT270_F VCADDQ_ROT90_F VCMPEQQ_F diff --git a/gcc/testsuite/gcc.target/arm/simd/mve-vbic.c b/gcc/testsuite/gcc.target/arm/simd/mve-vbic.c new file mode 100644 index 0000000..c9a64c6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/simd/mve-vbic.c @@ -0,0 +1,65 @@ +/* { dg-do assemble } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O3" } */ + +#include <stdint.h> + +#define FUNC(SIGN, TYPE, BITS, NB, OP, NAME) \ + void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a, TYPE##BITS##_t *b) { \ + int i; \ + for (i=0; i<NB; i++) { \ + dest[i] = a[i] OP b[i]; \ + } \ +} + +#define FUNC_IMM(SIGN, TYPE, BITS, NB, OP, NAME) \ + void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a) { \ + int i; \ + for (i=0; i<NB; i++) { \ + dest[i] = a[i] OP 1; \ + } \ +} + +/* 64-bit vectors. */ +FUNC(s, int, 32, 2, & ~ , vbic) +FUNC(u, uint, 32, 2, & ~ , vbic) +FUNC(s, int, 16, 4, & ~ , vbic) +FUNC(u, uint, 16, 4, & ~ , vbic) +FUNC(s, int, 8, 8, & ~ , vbic) +FUNC(u, uint, 8, 8, & ~ , vbic) + +/* 128-bit vectors. */ +FUNC(s, int, 32, 4, & ~ , vbic) +FUNC(u, uint, 32, 4, & ~ , vbic) +FUNC(s, int, 16, 8, & ~ , vbic) +FUNC(u, uint, 16, 8, & ~ , vbic) +FUNC(s, int, 8, 16, & ~ , vbic) +FUNC(u, uint, 8, 16, & ~ , vbic) + +/* 64-bit vectors. */ +FUNC_IMM(s, int, 32, 2, & ~, vbicimm) +FUNC_IMM(u, uint, 32, 2, & ~, vbicimm) +FUNC_IMM(s, int, 16, 4, & ~, vbicimm) +FUNC_IMM(u, uint, 16, 4, & ~, vbicimm) +FUNC_IMM(s, int, 8, 8, & ~, vbicimm) +FUNC_IMM(u, uint, 8, 8, & ~, vbicimm) + +/* 128-bit vectors. */ +FUNC_IMM(s, int, 32, 4, & ~, vbicimm) +FUNC_IMM(u, uint, 32, 4, & ~, vbicimm) +FUNC_IMM(s, int, 16, 8, & ~, vbicimm) +FUNC_IMM(u, uint, 16, 8, & ~, vbicimm) +FUNC_IMM(s, int, 8, 16, & ~, vbicimm) +FUNC_IMM(u, uint, 8, 16, & ~, vbicimm) + +/* MVE has only 128-bit vectors, so we can vectorize only half of the + functions above. */ +/* We emit vand.i[16|32] qX, #XX for the first four versions of the + 128-bit vector vbicimm tests. */ +/* For some reason, we do not generate the immediate version for + int8x16 and uint8x16, we still use vldr to load the vector of + immediates. */ +/* { dg-final { scan-assembler-times {vbic\tq[0-9]+, q[0-9]+, q[0-9]+} 6 } } */ +/* { dg-final { scan-assembler-times {vand.i[0-9]+\tq[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vand\tq[0-9]+, q[0-9]+, q[0-9]+} 2 } } */ -- 2.7.4