[Resending this with the patch compressed as it's more than 400 KB...]

Hi all,
this patch converts a number of multi multi choice patterns within the
aarch64 backend to the new syntax.

The list of the converted patterns is in the Changelog.

For completeness here follows the list of multi choice patterns that
were rejected for conversion by my parser, they typically have some C
as asm output and require some manual intervention:
aarch64_simd_vec_set<mode>, aarch64_get_lane<mode>,
aarch64_cm<optab>di, aarch64_cm<optab>di, aarch64_cmtstdi,
*aarch64_movv8di, *aarch64_be_mov<mode>, *aarch64_be_movci,
*aarch64_be_mov<mode>, *aarch64_be_movxi, *aarch64_sve_mov<mode>_le,
*aarch64_sve_mov<mode>_be, @aarch64_pred_mov<mode>,
@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx4SI_ONLY:mode>,
@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>,
*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_sxtw,
*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_uxtw,
@aarch64_vec_duplicate_vq<mode>_le, *vec_extract<mode><Vel>_0,
*vec_extract<mode><Vel>_v128, *cmp<cmp_op><mode>_and,
*fcm<cmp_op><mode>_and_combine, @aarch64_sve_ext<mode>,
@aarch64_sve2_<su>aba<mode>, *sibcall_insn, *sibcall_value_insn,
*xor_one_cmpl<mode>3, *insv_reg<mode>_<SUBDI_BITS>,
*aarch64_bfi<GPI:mode><ALLX:mode>_<SUBDI_BITS>,
*aarch64_bfidi<ALLX:mode>_subreg_<SUBDI_BITS>, *aarch64_bfxil<mode>,
*aarch64_bfxilsi_uxtw,
*aarch64_<su_optab>cvtf<fcvt_target><GPF:mode>2_mult,
atomic_store<mode>.

Bootstraped and reg tested on aarch64-unknown-linux-gnu, also I
analysed tmp-mddump.md (from 'make mddump') and could not find
effective differences, okay for trunk?

Bests

  Andrea

gcc/ChangeLog:

        * config/aarch64/aarch64.md (@ccmp<CC_ONLY:mode><GPI:mode>)
        (@ccmp<CC_ONLY:mode><GPI:mode>_rev, *call_insn, *call_value_insn)
        (*mov<mode>_aarch64, load_pair_sw_<SX:mode><SX2:mode>)
        (load_pair_dw_<DX:mode><DX2:mode>)
        (store_pair_sw_<SX:mode><SX2:mode>)
        (store_pair_dw_<DX:mode><DX2:mode>, *extendsidi2_aarch64)
        (*zero_extendsidi2_aarch64, *load_pair_zero_extendsidi2_aarch64)
        (*extend<SHORT:mode><GPI:mode>2_aarch64)
        (*zero_extend<SHORT:mode><GPI:mode>2_aarch64)
        (*extendqihi2_aarch64, *zero_extendqihi2_aarch64)
        (*add<mode>3_aarch64, *addsi3_aarch64_uxtw, *add<mode>3_poly_1)
        (add<mode>3_compare0, *addsi3_compare0_uxtw)
        (*add<mode>3_compareC_cconly, add<mode>3_compareC)
        (*add<mode>3_compareV_cconly_imm, add<mode>3_compareV_imm)
        (*add<mode>3nr_compare0, subdi3, subv<GPI:mode>_imm)
        (*cmpv<GPI:mode>_insn, sub<mode>3_compare1_imm, neg<mode>2)
        (cmp<mode>, fcmp<mode>, fcmpe<mode>, *cmov<mode>_insn)
        (*cmovsi_insn_uxtw, <optab><mode>3, *<optab>si3_uxtw)
        (*and<mode>3_compare0, *andsi3_compare0_uxtw, one_cmpl<mode>2)
        (*<NLOGICAL:optab>_one_cmpl<mode>3, *and<mode>3nr_compare0)
        (*aarch64_ashl_sisd_or_int_<mode>3)
        (*aarch64_lshr_sisd_or_int_<mode>3)
        (*aarch64_ashr_sisd_or_int_<mode>3, *ror<mode>3_insn)
        (*<optab>si3_insn_uxtw, <optab>_trunc<fcvt_target><GPI:mode>2)
        (<optab><fcvt_target><GPF:mode>2)
        (<FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3)
        (<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3)
        (*aarch64_<optab><mode>3_cssc, copysign<GPF:mode>3_insn): Update
        to new syntax.

        * config/aarch64/aarch64-sve2.md (@aarch64_scatter_stnt<mode>)
        (@aarch64_scatter_stnt_<SVE_FULL_SDI:mode><SVE_PARTIAL_I:mode>)
        (*aarch64_mul_unpredicated_<mode>)
        (@aarch64_pred_<sve_int_op><mode>, *cond_<sve_int_op><mode>_2)
        (*cond_<sve_int_op><mode>_3, *cond_<sve_int_op><mode>_any)
        (*cond_<sve_int_op><mode>_z, @aarch64_pred_<sve_int_op><mode>)
        (*cond_<sve_int_op><mode>_2, *cond_<sve_int_op><mode>_3)
        (*cond_<sve_int_op><mode>_any, @aarch64_sve_<sve_int_op><mode>)
        (@aarch64_sve_<sve_int_op>_lane_<mode>)
        (@aarch64_sve_add_mul_lane_<mode>)
        (@aarch64_sve_sub_mul_lane_<mode>, @aarch64_sve2_xar<mode>)
        (*aarch64_sve2_bcax<mode>, @aarch64_sve2_eor3<mode>)
        (*aarch64_sve2_nor<mode>, *aarch64_sve2_nand<mode>)
        (*aarch64_sve2_bsl<mode>, *aarch64_sve2_nbsl<mode>)
        (*aarch64_sve2_bsl1n<mode>, *aarch64_sve2_bsl2n<mode>)
        (*aarch64_sve2_sra<mode>, @aarch64_sve_add_<sve_int_op><mode>)
        (*aarch64_sve2_<su>aba<mode>, @aarch64_sve_add_<sve_int_op><mode>)
        (@aarch64_sve_add_<sve_int_op>_lane_<mode>)
        (@aarch64_sve_qadd_<sve_int_op><mode>)
        (@aarch64_sve_qadd_<sve_int_op>_lane_<mode>)
        (@aarch64_sve_sub_<sve_int_op><mode>)
        (@aarch64_sve_sub_<sve_int_op>_lane_<mode>)
        (@aarch64_sve_qsub_<sve_int_op><mode>)
        (@aarch64_sve_qsub_<sve_int_op>_lane_<mode>)
        (@aarch64_sve_<sve_fp_op><mode>, @aarch64_<sve_fp_op>_lane_<mode>)
        (@aarch64_pred_<sve_int_op><mode>)
        (@aarch64_pred_<sve_fp_op><mode>, *cond_<sve_int_op><mode>_2)
        (*cond_<sve_int_op><mode>_z, @aarch64_sve_<optab><mode>)
        (@aarch64_<optab>_lane_<mode>, @aarch64_sve_<optab><mode>)
        (@aarch64_<optab>_lane_<mode>, @aarch64_pred_<sve_fp_op><mode>)
        (*cond_<sve_fp_op><mode>_any_relaxed)
        (*cond_<sve_fp_op><mode>_any_strict)
        (@aarch64_pred_<sve_int_op><mode>, *cond_<sve_int_op><mode>)
        (@aarch64_pred_<sve_fp_op><mode>, *cond_<sve_fp_op><mode>)
        (*cond_<sve_fp_op><mode>_strict): Update to new syntax.

        * config/aarch64/aarch64-sve.md (*aarch64_sve_mov<mode>_ldr_str)
        (*aarch64_sve_mov<mode>_no_ldr_str, @aarch64_pred_mov<mode>)
        (*aarch64_sve_mov<mode>, aarch64_wrffr)
        (mask_scatter_store<mode><v_int_container>)
        (*mask_scatter_store<mode><v_int_container>_<su>xtw_unpacked)
        (*mask_scatter_store<mode><v_int_container>_sxtw)
        (*mask_scatter_store<mode><v_int_container>_uxtw)
        (@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>)
        (@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>)
        (*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_sxtw)
        (*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxtw)
        (*vec_duplicate<mode>_reg, vec_shl_insert_<mode>)
        (vec_series<mode>, @extract_<last_op>_<mode>)
        (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2)
        (*cond_<optab><mode>_any, @aarch64_pred_<optab><mode>)
        (@aarch64_sve_revbhw_<SVE_ALL:mode><PRED_HSD:mode>)
        (@cond_<optab><mode>)
        (*<optab><SVE_PARTIAL_I:mode><SVE_HSDI:mode>2)
        (@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>)
        (@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>)
        (*cond_uxt<mode>_2, *cond_uxt<mode>_any, *cnot<mode>)
        (*cond_cnot<mode>_2, *cond_cnot<mode>_any)
        (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_relaxed)
        (*cond_<optab><mode>_2_strict, *cond_<optab><mode>_any_relaxed)
        (*cond_<optab><mode>_any_strict, @aarch64_pred_<optab><mode>)
        (*cond_<optab><mode>_2, *cond_<optab><mode>_3)
        (*cond_<optab><mode>_any, add<mode>3, sub<mode>3)
        (@aarch64_pred_<su>abd<mode>, *aarch64_cond_<su>abd<mode>_2)
        (*aarch64_cond_<su>abd<mode>_3, *aarch64_cond_<su>abd<mode>_any)
        (@aarch64_sve_<optab><mode>, @aarch64_pred_<optab><mode>)
        (*cond_<optab><mode>_2, *cond_<optab><mode>_z)
        (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2)
        (*cond_<optab><mode>_3, *cond_<optab><mode>_any, <optab><mode>3)
        (*cond_bic<mode>_2, *cond_bic<mode>_any)
        (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_const)
        (*cond_<optab><mode>_any_const, *cond_<sve_int_op><mode>_m)
        (*cond_<sve_int_op><mode>_z, *sdiv_pow2<mode>3)
        (*cond_<sve_int_op><mode>_2, *cond_<sve_int_op><mode>_any)
        (@aarch64_pred_<optab><mode>, *cond_<optab><mode>_2_relaxed)
        (*cond_<optab><mode>_2_strict, *cond_<optab><mode>_any_relaxed)
        (*cond_<optab><mode>_any_strict, @aarch64_pred_<optab><mode>)
        (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
        (*cond_<optab><mode>_2_const_relaxed)
        (*cond_<optab><mode>_2_const_strict)
        (*cond_<optab><mode>_3_relaxed, *cond_<optab><mode>_3_strict)
        (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
        (*cond_<optab><mode>_any_const_relaxed)
        (*cond_<optab><mode>_any_const_strict)
        (@aarch64_pred_<optab><mode>, *cond_add<mode>_2_const_relaxed)
        (*cond_add<mode>_2_const_strict)
        (*cond_add<mode>_any_const_relaxed)
        (*cond_add<mode>_any_const_strict, @aarch64_pred_<optab><mode>)
        (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
        (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
        (@aarch64_pred_<optab><mode>, *cond_sub<mode>_3_const_relaxed)
        (*cond_sub<mode>_3_const_strict, *cond_sub<mode>_const_relaxed)
        (*cond_sub<mode>_const_strict, *aarch64_pred_abd<mode>_relaxed)
        (*aarch64_pred_abd<mode>_strict)
        (*aarch64_cond_abd<mode>_2_relaxed)
        (*aarch64_cond_abd<mode>_2_strict)
        (*aarch64_cond_abd<mode>_3_relaxed)
        (*aarch64_cond_abd<mode>_3_strict)
        (*aarch64_cond_abd<mode>_any_relaxed)
        (*aarch64_cond_abd<mode>_any_strict, @aarch64_pred_<optab><mode>)
        (@aarch64_pred_fma<mode>, *cond_fma<mode>_2, *cond_fma<mode>_4)
        (*cond_fma<mode>_any, @aarch64_pred_fnma<mode>)
        (*cond_fnma<mode>_2, *cond_fnma<mode>_4, *cond_fnma<mode>_any)
        (<sur>dot_prod<vsi2qi>, @aarch64_<sur>dot_prod_lane<vsi2qi>)
        (@<sur>dot_prod<vsi2qi>, @aarch64_<sur>dot_prod_lane<vsi2qi>)
        (@aarch64_sve_add_<optab><vsi2qi>, @aarch64_pred_<optab><mode>)
        (*cond_<optab><mode>_2_relaxed, *cond_<optab><mode>_2_strict)
        (*cond_<optab><mode>_4_relaxed, *cond_<optab><mode>_4_strict)
        (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
        (@aarch64_<optab>_lane_<mode>, @aarch64_pred_<optab><mode>)
        (*cond_<optab><mode>_4_relaxed, *cond_<optab><mode>_4_strict)
        (*cond_<optab><mode>_any_relaxed, *cond_<optab><mode>_any_strict)
        (@aarch64_<optab>_lane_<mode>, @aarch64_sve_tmad<mode>)
        (@aarch64_sve_<sve_fp_op>vnx4sf)
        (@aarch64_sve_<sve_fp_op>_lanevnx4sf)
        (@aarch64_sve_<sve_fp_op><mode>, *vcond_mask_<mode><vpred>)
        (@aarch64_sel_dup<mode>, @aarch64_pred_cmp<cmp_op><mode>)
        (*cmp<cmp_op><mode>_cc, *cmp<cmp_op><mode>_ptest)
        (@aarch64_pred_fcm<cmp_op><mode>, @fold_extract_<last_op>_<mode>)
        (@aarch64_fold_extract_vector_<last_op>_<mode>)
        (@aarch64_sve_splice<mode>)
        (@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>)
        (@aarch64_sve_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>)
        (*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_relaxed)
        (*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>_strict)
        (*cond_<optab>_trunc<VNx2DF_ONLY:mode><VNx4SI_ONLY:mode>)
        (@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>)
        (@aarch64_sve_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>)
        (*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_relaxed)
        (*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>_strict)
        (*cond_<optab>_extend<VNx4SI_ONLY:mode><VNx2DF_ONLY:mode>)
        (@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>)
        (*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>)
        (@aarch64_sve_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>)
        (*cond_<optab>_trunc<VNx4SF_ONLY:mode><VNx8BF_ONLY:mode>)
        (@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>)
        (*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>)
        (@aarch64_brk<brk_op>, *aarch64_sve_<inc_dec><mode>_cntp): Update
        to new syntax.

        * config/aarch64/aarch64-simd.md (aarch64_simd_dup<mode>)
        (load_pair<DREG:mode><DREG2:mode>)
        (vec_store_pair<DREG:mode><DREG2:mode>, aarch64_simd_stp<mode>)
        (aarch64_simd_mov_from_<mode>low)
        (aarch64_simd_mov_from_<mode>high, and<mode>3<vczle><vczbe>)
        (ior<mode>3<vczle><vczbe>, aarch64_simd_ashr<mode><vczle><vczbe>)
        (aarch64_simd_bsl<mode>_internal<vczle><vczbe>)
        (*aarch64_simd_bsl<mode>_alt<vczle><vczbe>)
        (aarch64_simd_bsldi_internal, aarch64_simd_bsldi_alt)
        (store_pair_lanes<mode>, *aarch64_combine_internal<mode>)
        (*aarch64_combine_internal_be<mode>, *aarch64_combinez<mode>)
        (*aarch64_combinez_be<mode>)
        (aarch64_cm<optab><mode><vczle><vczbe>, *aarch64_cm<optab>di)
        (aarch64_cm<optab><mode><vczle><vczbe>, *aarch64_mov<mode>)
        (*aarch64_be_mov<mode>, *aarch64_be_movoi): Update to new syntax.

Attachment: 0003-aarch64-Convert-aarch64-multi-choice-patterns-to-new.patch.gz
Description: application/gzip

Reply via email to