On Thu, Dec 17, 2015 at 03:36:40PM +0000, Kyrill Tkachov wrote: > 2015-12-17 Kyrylo Tkachov <kyrylo.tkac...@arm.com> > > PR rtl-optimization/68796 > * config/aarch64/aarch64.md (*and<mode>3nr_compare0_zextract): > New pattern. > * config/aarch64/aarch64.c (aarch64_select_cc_mode): Handle > ZERO_EXTRACT comparison with zero. > (aarch64_mask_from_zextract_ops): New function. > * config/aarch64/aarch64-protos.h (aarch64_mask_from_zextract_ops): > New prototype. > > 2015-12-17 Kyrylo Tkachov <kyrylo.tkac...@arm.com> > > PR rtl-optimization/68796 > * gcc.target/aarch64/tst_3.c: New test. > * gcc.target/aarch64/tst_4.c: Likewise.
Two comments. > diff --git a/gcc/config/aarch64/aarch64-protos.h > b/gcc/config/aarch64/aarch64-protos.h > index > 87d6eb1358845527d7068550925949802a7e48e2..febca98d38d5f09c97b0f79adc55bb29eca217b9 > 100644 > --- a/gcc/config/aarch64/aarch64-protos.h > +++ b/gcc/config/aarch64/aarch64-protos.h > @@ -330,6 +330,7 @@ int aarch64_uxt_size (int, HOST_WIDE_INT); > int aarch64_vec_fpconst_pow_of_2 (rtx); > rtx aarch64_final_eh_return_addr (void); > rtx aarch64_legitimize_reload_address (rtx *, machine_mode, int, int, int); > +rtx aarch64_mask_from_zextract_ops (rtx, rtx); > const char *aarch64_output_move_struct (rtx *operands); > rtx aarch64_return_addr (int, rtx); > rtx aarch64_simd_gen_const_vector_dup (machine_mode, int); > diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c > index > cb8955d5d6c909e8179bb1ab8203eb165f55e4b6..58a9fc68f391162ed9847d7fb79d70d3ee9919f5 > 100644 > --- a/gcc/config/aarch64/aarch64.c > +++ b/gcc/config/aarch64/aarch64.c > @@ -4147,7 +4147,9 @@ aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y) > && y == const0_rtx > && (code == EQ || code == NE || code == LT || code == GE) > && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == > AND > - || GET_CODE (x) == NEG)) > + || GET_CODE (x) == NEG > + || (GET_CODE (x) == ZERO_EXTRACT && CONST_INT_P (XEXP (x, 1)) > + && CONST_INT_P (XEXP (x, 2))))) > return CC_NZmode; > > /* A compare with a shifted operand. Because of canonicalization, > @@ -10757,6 +10759,21 @@ aarch64_simd_imm_zero_p (rtx x, machine_mode mode) > return x == CONST0_RTX (mode); > } > > + > +/* Return the bitmask CONST_INT to select the bits required by a zero extract > + operation of width WIDTH at bit position POS. */ > + > +rtx > +aarch64_mask_from_zextract_ops (rtx width, rtx pos) > +{ It is up to you, but would this not more naturally be: unsigned HOST_WIDE_INT aarch64_mask_from_zextract_ops (rtx width, rtx pos) Given how it gets used elsewhere? > + gcc_assert (CONST_INT_P (width)); > + gcc_assert (CONST_INT_P (pos)); > + > + unsigned HOST_WIDE_INT mask > + = ((unsigned HOST_WIDE_INT)1 << UINTVAL (width)) - 1; Space between (unsigned HOST_WIDE_INT) and 1. > + return GEN_INT (mask << UINTVAL (pos)); > +} > + > bool > aarch64_simd_imm_scalar_p (rtx x, machine_mode mode ATTRIBUTE_UNUSED) > { Otherwise, this is OK. Thanks, James