On Wed, Nov 30, 2022 at 9:44 AM Richard Biener <richard.guent...@gmail.com> wrote: > > On Tue, Nov 29, 2022 at 11:05 AM Manolis Tsamis <manolis.tsa...@vrull.eu> > wrote: > > > > When using SWAR (SIMD in a register) techniques a comparison operation > > within > > such a register can be made by using a combination of shifts, bitwise and > > and > > multiplication. If code using this scheme is vectorized then there is > > potential > > to replace all these operations with a single vector comparison, by > > reinterpreting > > the vector types to match the width of the SWAR register. > > > > For example, for the test function packed_cmp_16_32, the original generated > > code is: > > > > ldr q0, [x0] > > add w1, w1, 1 > > ushr v0.4s, v0.4s, 15 > > and v0.16b, v0.16b, v2.16b > > shl v1.4s, v0.4s, 16 > > sub v0.4s, v1.4s, v0.4s > > str q0, [x0], 16 > > cmp w2, w1 > > bhi .L20 > > > > with this pattern the above can be optimized to: > > > > ldr q0, [x0] > > add w1, w1, 1 > > cmlt v0.8h, v0.8h, #0 > > str q0, [x0], 16 > > cmp w2, w1 > > bhi .L20 > > > > The effect is similar for x86-64. > > > > Signed-off-by: Manolis Tsamis <manolis.tsa...@vrull.eu> > > > > gcc/ChangeLog: > > > > * match.pd: Simplify vector shift + bit_and + multiply in some > > cases. > > > > gcc/testsuite/ChangeLog: > > > > * gcc.target/aarch64/swar_to_vec_cmp.c: New test. > > > > --- > > > > Changes in v2: > > - Changed pattern to use vec_cond_expr. > > - Changed pattern to work with VLA vector. > > - Added more checks and comments. > > > > gcc/match.pd | 60 ++++++++++++++++ > > .../gcc.target/aarch64/swar_to_vec_cmp.c | 72 +++++++++++++++++++ > > 2 files changed, 132 insertions(+) > > create mode 100644 gcc/testsuite/gcc.target/aarch64/swar_to_vec_cmp.c > > > > diff --git a/gcc/match.pd b/gcc/match.pd > > index 67a0a682f31..05e7fc79ba8 100644 > > --- a/gcc/match.pd > > +++ b/gcc/match.pd > > @@ -301,6 +301,66 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) > > (view_convert (bit_and:itype (view_convert @0) > > (ne @1 { build_zero_cst (type); }))))))) > > > > +/* In SWAR (SIMD in a register) code a signed comparison of packed data can > > + be constructed with a particular combination of shift, bitwise and, > > + and multiplication by constants. If that code is vectorized we can > > + convert this pattern into a more efficient vector comparison. */ > > +(simplify > > + (mult (bit_and (rshift @0 uniform_integer_cst_p@1) > > + uniform_integer_cst_p@2) > > + uniform_integer_cst_p@3) > > Please use VECTOR_CST in the match instead of uniform_integer_cst_p > and instead ... > > > + (with { > > + tree rshift_cst = uniform_integer_cst_p (@1); > > + tree bit_and_cst = uniform_integer_cst_p (@2); > > + tree mult_cst = uniform_integer_cst_p (@3); > > + } > > + /* Make sure we're working with vectors and uniform vector constants. */ > > + (if (VECTOR_TYPE_P (type) > > ... test for non-NULL *_cst here where you can use uniform_vector_p instead > of uniform_integer_cst_p. You can elide the VECTOR_TYPE_P check then > and instead do INTEGRAL_TYPE_P (TREE_TYPE (type)). >
I implemented this solution but it didn't match the pattern and after re-examining the GIMPLE code I remembered that it looks like this: vect__5.151_72 = MEM <vector(4) unsigned intD.10> [(uint32_tD.4389 *)_58]; vect__38.152_73 = vect__5.151_72 >> 15; vect__39.153_74 = vect__38.152_73 & { 65537, 65537, 65537, 65537 }; vect__40.154_75 = vect__39.153_74 * { 65535, 65535, 65535, 65535 }; Even if all the operations are on vectors the shift is not a VECTOR_CST. I don't know if this is guaranteed to be the case, i.e. vector shifts are always a single constant? I used uniform_integer_cst_p to handle this difference in the constants involved without making an assumption about the shift being a single constant. Is there a better way to deal with this subtlety? Manolis > > > + && tree_fits_uhwi_p (rshift_cst) > > + && tree_fits_uhwi_p (mult_cst) > > + && tree_fits_uhwi_p (bit_and_cst)) > > + /* Compute what constants would be needed for this to represent a packed > > + comparison based on the shift amount denoted by RSHIFT_CST. */ > > + (with { > > + HOST_WIDE_INT vec_elem_bits = vector_element_bits (type); > > + poly_int64 vec_nelts = TYPE_VECTOR_SUBPARTS (type); > > + poly_int64 vec_bits = vec_elem_bits * vec_nelts; > > + > > + unsigned HOST_WIDE_INT cmp_bits_i, bit_and_i, mult_i; > > + unsigned HOST_WIDE_INT target_mult_i, target_bit_and_i; > > + cmp_bits_i = tree_to_uhwi (rshift_cst) + 1; > > + target_mult_i = (HOST_WIDE_INT_1U << cmp_bits_i) - 1; > > + > > + mult_i = tree_to_uhwi (mult_cst); > > + bit_and_i = tree_to_uhwi (bit_and_cst); > > + target_bit_and_i = 0; > > + > > + /* The bit pattern in BIT_AND_I should be a mask for the least > > + significant bit of each packed element that is CMP_BITS wide. */ > > + for (unsigned i = 0; i < vec_elem_bits / cmp_bits_i; i++) > > + target_bit_and_i = (target_bit_and_i << cmp_bits_i) | 1U; > > + } > > + (if ((exact_log2 (cmp_bits_i)) >= 0 > > + && cmp_bits_i < HOST_BITS_PER_WIDE_INT > > + && multiple_p (vec_bits, cmp_bits_i) > > + && vec_elem_bits <= HOST_BITS_PER_WIDE_INT > > + && target_mult_i == mult_i > > + && target_bit_and_i == bit_and_i) > > + /* Compute the vector shape for the comparison and check if the > > target is > > + able to expand the comparison with that type. */ > > + (with { > > + /* We're doing a signed comparison. */ > > + tree cmp_type = build_nonstandard_integer_type (cmp_bits_i, 0); > > + poly_int64 vector_type_nelts = exact_div (vec_bits, cmp_bits_i); > > + tree vector_cmp_type = build_vector_type (cmp_type, > > vector_type_nelts); > > + tree zeros = build_zero_cst (vector_cmp_type); > > + tree ones = build_all_ones_cst (vector_cmp_type); > > + } > > + (if (expand_vec_cmp_expr_p (vector_cmp_type, vector_cmp_type, > > LT_EXPR)) > > + (view_convert:type (vec_cond (lt (view_convert:vector_cmp_type @0) > > + { zeros; }) > > + { ones; } { zeros; }))))))))) > > You are testing whether we can expand the comparison but not whether we can > expand the vector condition here? The set of combinations supported are > tricky, I think your test is conservatively OK but it might fail to match > AVX512 > and will fail targets with masks (SVE, GCN?). I think adding > > || expand_vec_cond_expr_p (vector_cmp_type, vector_cmp_type, LT_EXPR) > > would fix that (but there will be extra cost in producing the final > result vector > from the comparison mask). > > I do wonder if, as this is targeted at vectorization, this shouldn't > be a vectorizer pattern instead of a post-processing transform? That > way we would > get the costing in the vectorizer correct and not rely on integer > vector shift or > multiplication and also get the cost of producing the result vector correct? > I > can't fully decipher the trick though, but assume that vector_cmp_type > always has > smaller elements than 'type'. > > Thanks, > Richard. > > > + > > (for cmp (gt ge lt le) > > outp (convert convert negate negate) > > outn (negate negate convert convert) > > diff --git a/gcc/testsuite/gcc.target/aarch64/swar_to_vec_cmp.c > > b/gcc/testsuite/gcc.target/aarch64/swar_to_vec_cmp.c > > new file mode 100644 > > index 00000000000..26f9ad9ef28 > > --- /dev/null > > +++ b/gcc/testsuite/gcc.target/aarch64/swar_to_vec_cmp.c > > @@ -0,0 +1,72 @@ > > +/* { dg-do compile } */ > > +/* { dg-options "-O2 -ftree-vectorize" } */ > > + > > +typedef unsigned char uint8_t; > > +typedef unsigned short uint16_t; > > +typedef unsigned int uint32_t; > > + > > +/* 8-bit SWAR tests. */ > > + > > +static uint8_t packed_cmp_8_8(uint8_t a) > > +{ > > + return ((a >> 7) & 0x1U) * 0xffU; > > +} > > + > > +/* 16-bit SWAR tests. */ > > + > > +static uint16_t packed_cmp_8_16(uint16_t a) > > +{ > > + return ((a >> 7) & 0x101U) * 0xffU; > > +} > > + > > +static uint16_t packed_cmp_16_16(uint16_t a) > > +{ > > + return ((a >> 15) & 0x1U) * 0xffffU; > > +} > > + > > +/* 32-bit SWAR tests. */ > > + > > +static uint32_t packed_cmp_8_32(uint32_t a) > > +{ > > + return ((a >> 7) & 0x1010101U) * 0xffU; > > +} > > + > > +static uint32_t packed_cmp_16_32(uint32_t a) > > +{ > > + return ((a >> 15) & 0x10001U) * 0xffffU; > > +} > > + > > +static uint32_t packed_cmp_32_32(uint32_t a) > > +{ > > + return ((a >> 31) & 0x1U) * 0xffffffffU; > > +} > > + > > +/* Driver function to test the vectorized code generated for the different > > + packed_cmp variants. */ > > + > > +#define VECTORIZED_PACKED_CMP(T, FUNC) \ > > + void vectorized_cmp_##FUNC(T* a, int n) \ > > + { \ > > + n = (n / 32) * 32; \ > > + for(int i = 0; i < n; i += 4) \ > > + { \ > > + a[i + 0] = FUNC(a[i + 0]); \ > > + a[i + 1] = FUNC(a[i + 1]); \ > > + a[i + 2] = FUNC(a[i + 2]); \ > > + a[i + 3] = FUNC(a[i + 3]); \ > > + } \ > > + } > > + > > +VECTORIZED_PACKED_CMP(uint8_t, packed_cmp_8_8); > > + > > +VECTORIZED_PACKED_CMP(uint16_t, packed_cmp_8_16); > > +VECTORIZED_PACKED_CMP(uint16_t, packed_cmp_16_16); > > + > > +VECTORIZED_PACKED_CMP(uint32_t, packed_cmp_8_32); > > +VECTORIZED_PACKED_CMP(uint32_t, packed_cmp_16_32); > > +VECTORIZED_PACKED_CMP(uint32_t, packed_cmp_32_32); > > + > > +/* { dg-final { scan-assembler {\tcmlt\t} } } */ > > +/* { dg-final { scan-assembler-not {\tushr\t} } } */ > > +/* { dg-final { scan-assembler-not {\tshl\t} } } */ > > +/* { dg-final { scan-assembler-not {\tmul\t} } } */ > > -- > > 2.34.1 > >