aarch64_evpc_sel (new in GCC 10) got the true and false vectors the wrong way round, leading to execution failures with fixed-length 128-bit SVE.
Now that the ACLE types are in trunk, it's much easier to match the exact asm sequence for a permute. Tested on aarch64-linux-gnu and applied as r280121. Richard 2020-01-10 Richard Sandiford <richard.sandif...@arm.com> gcc/ * config/aarch64/aarch64.c (aarch64_evpc_sel): Fix gen_vcond_mask invocation. gcc/testsuite/ * gcc.target/aarch64/sve/sel_1.c: Use SVE types for the arguments and return values. Use check-function-bodies instead of scan-assembler. * gcc.target/aarch64/sve/sel_2.c: Likewise * gcc.target/aarch64/sve/sel_3.c: Likewise. Index: gcc/config/aarch64/aarch64.c =================================================================== --- gcc/config/aarch64/aarch64.c 2020-01-09 16:26:39.658925053 +0000 +++ gcc/config/aarch64/aarch64.c 2020-01-10 16:30:07.989668567 +0000 @@ -19449,6 +19449,7 @@ aarch64_evpc_sel (struct expand_vec_perm machine_mode pred_mode = aarch64_sve_pred_mode (vmode); + /* Build a predicate that is true when op0 elements should be used. */ rtx_vector_builder builder (pred_mode, n_patterns, 2); for (int i = 0; i < n_patterns * 2; i++) { @@ -19459,7 +19460,8 @@ aarch64_evpc_sel (struct expand_vec_perm rtx const_vec = builder.build (); rtx pred = force_reg (pred_mode, const_vec); - emit_insn (gen_vcond_mask (vmode, vmode, d->target, d->op1, d->op0, pred)); + /* TARGET = PRED ? OP0 : OP1. */ + emit_insn (gen_vcond_mask (vmode, vmode, d->target, d->op0, d->op1, pred)); return true; } Index: gcc/testsuite/gcc.target/aarch64/sve/sel_1.c =================================================================== --- gcc/testsuite/gcc.target/aarch64/sve/sel_1.c 2019-08-25 19:10:33.442172145 +0100 +++ gcc/testsuite/gcc.target/aarch64/sve/sel_1.c 2020-01-10 16:30:07.989668567 +0000 @@ -1,5 +1,6 @@ /* { dg-do assemble { target aarch64_asm_sve_ok } } */ /* { dg-options "-O2 -msve-vector-bits=256 --save-temps" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include <stdint.h> @@ -13,15 +14,14 @@ #define MASK_32 { 0, 33, 2, 35, 4, 37, #define INDEX_32 vnx16qi -#define PERMUTE(type, nunits) \ -type permute_##type (type x, type y) \ -{ \ - return __builtin_shuffle (x, y, (INDEX_##nunits) MASK_##nunits); \ +/* +** permute: +** ptrue (p[0-7])\.h, vl16 +** sel z0\.b, \1, z0\.b, z1\.b +** ret +*/ +__SVInt8_t +permute (__SVInt8_t x, __SVInt8_t y) +{ + return __builtin_shuffle ((vnx16qi) x, (vnx16qi) y, (vnx16qi) MASK_32); } - -PERMUTE(vnx16qi, 32) - -/* { dg-final { scan-assembler-not {\ttbl\t} } } */ - -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.b, p[0-9]+, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tptrue\tp[0-9]+\.h, vl16\n} 1 } } */ Index: gcc/testsuite/gcc.target/aarch64/sve/sel_2.c =================================================================== --- gcc/testsuite/gcc.target/aarch64/sve/sel_2.c 2019-08-25 19:10:33.442172145 +0100 +++ gcc/testsuite/gcc.target/aarch64/sve/sel_2.c 2020-01-10 16:30:07.989668567 +0000 @@ -1,14 +1,13 @@ /* { dg-do assemble { target aarch64_asm_sve_ok } } */ /* { dg-options "-O2 -msve-vector-bits=256 --save-temps" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include <stdint.h> typedef int8_t vnx16qi __attribute__((vector_size (32))); typedef int16_t vnx8hi __attribute__((vector_size (32))); -typedef int32_t vnx4si __attribute__((vector_size (32))); typedef _Float16 vnx8hf __attribute__((vector_size (32))); -typedef float vnx4sf __attribute__((vector_size (32))); /* Predicate vector: 1 0 0 0 ... */ @@ -20,22 +19,39 @@ #define MASK_32 { 0, 33, 34, 35, 4, 37, #define MASK_16 {0, 17, 2, 19, 4, 21, 6, 23, 8, 25, 10, 27, 12, 29, 14, 31} -#define INDEX_32 vnx16qi -#define INDEX_16 vnx8hi - -#define PERMUTE(type, nunits) \ -type permute_##type (type x, type y) \ -{ \ - return __builtin_shuffle (x, y, (INDEX_##nunits) MASK_##nunits); \ +/* +** permute_vnx16qi: +** ptrue (p[0-7])\.s, vl8 +** sel z0\.b, \1, z0\.b, z1\.b +** ret +*/ +__SVInt8_t +permute_vnx16qi (__SVInt8_t x, __SVInt8_t y) +{ + return __builtin_shuffle ((vnx16qi) x, (vnx16qi) y, (vnx16qi) MASK_32); } -PERMUTE(vnx16qi, 32) -PERMUTE(vnx8hi, 16) -PERMUTE(vnx8hf, 16) - -/* { dg-final { scan-assembler-not {\ttbl\t} } } */ - -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.b, p[0-9]+, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.h, p[0-9]+, z[0-9]+\.h, z[0-9]+\.h\n} 2 } } */ +/* +** permute_vnx8hi: +** ptrue (p[0-7])\.s, vl8 +** sel z0\.h, \1, z0\.h, z1\.h +** ret +*/ +__SVInt16_t +permute_vnx8hi (__SVInt16_t x, __SVInt16_t y) +{ + return __builtin_shuffle ((vnx8hi) x, (vnx8hi) y, (vnx8hi) MASK_16); +} -/* { dg-final { scan-assembler-times {\tptrue\tp[0-9]+\.s, vl8\n} 3 } } */ +/* +** permute_vnx8hf: +** ptrue (p[0-7])\.s, vl8 +** sel z0\.h, \1, z0\.h, z1\.h +** ret +*/ +__SVFloat16_t +permute_vnx8hf (__SVFloat16_t x, __SVFloat16_t y) +{ + return (__SVFloat16_t) __builtin_shuffle ((vnx8hf) x, (vnx8hf) y, + (vnx8hi) MASK_16); +} Index: gcc/testsuite/gcc.target/aarch64/sve/sel_3.c =================================================================== --- gcc/testsuite/gcc.target/aarch64/sve/sel_3.c 2019-08-25 19:10:33.442172145 +0100 +++ gcc/testsuite/gcc.target/aarch64/sve/sel_3.c 2020-01-10 16:30:07.989668567 +0000 @@ -1,5 +1,6 @@ /* { dg-do assemble { target aarch64_asm_sve_ok } } */ /* { dg-options "-O2 -msve-vector-bits=256 --save-temps" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include <stdint.h> @@ -25,26 +26,63 @@ #define MASK_16 { 0, 17, 18, 19, 4, 21, #define MASK_8 { 0, 9, 2, 11, 4, 13, 6, 15 } -#define INDEX_32 vnx16qi -#define INDEX_16 vnx8hi -#define INDEX_8 vnx4si - -#define PERMUTE(type, nunits) \ -type permute_##type (type x, type y) \ -{ \ - return __builtin_shuffle (x, y, (INDEX_##nunits) MASK_##nunits); \ +/* +** permute_vnx16qi: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.b, \1, z0\.b, z1\.b +** ret +*/ +__SVInt8_t +permute_vnx16qi (__SVInt8_t x, __SVInt8_t y) +{ + return __builtin_shuffle ((vnx16qi) x, (vnx16qi) y, (vnx16qi) MASK_32); } -PERMUTE(vnx16qi, 32) -PERMUTE(vnx8hi, 16) -PERMUTE(vnx4si, 8) -PERMUTE(vnx8hf, 16) -PERMUTE(vnx4sf, 8) - -/* { dg-final { scan-assembler-not {\ttbl\t} } } */ - -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.b, p[0-9]+, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.h, p[0-9]+, z[0-9]+\.h, z[0-9]+\.h\n} 2 } } */ -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.s, p[0-9]+, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */ +/* +** permute_vnx8hi: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.h, \1, z0\.h, z1\.h +** ret +*/ +__SVInt16_t +permute_vnx8hi (__SVInt16_t x, __SVInt16_t y) +{ + return __builtin_shuffle ((vnx8hi) x, (vnx8hi) y, (vnx8hi) MASK_16); +} + +/* +** permute_vnx4si: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.s, \1, z0\.s, z1\.s +** ret +*/ +__SVInt32_t +permute_vnx4si (__SVInt32_t x, __SVInt32_t y) +{ + return __builtin_shuffle ((vnx4si) x, (vnx4si) y, (vnx4si) MASK_8); +} -/* { dg-final { scan-assembler-times {\tptrue\tp[0-9]+\.d, vl4\n} 5 } } */ +/* +** permute_vnx8hf: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.h, \1, z0\.h, z1\.h +** ret +*/ +__SVFloat16_t +permute_vnx8hf (__SVFloat16_t x, __SVFloat16_t y) +{ + return (__SVFloat16_t) __builtin_shuffle ((vnx8hf) x, (vnx8hf) y, + (vnx8hi) MASK_16); +} + +/* +** permute_vnx4sf: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.s, \1, z0\.s, z1\.s +** ret +*/ +__SVFloat32_t +permute_vnx4sf (__SVFloat16_t x, __SVFloat16_t y) +{ + return __builtin_shuffle ((vnx4sf) x, (vnx4sf) y, (vnx4si) MASK_8); +}