https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98856
--- Comment #14 from Jakub Jelinek <jakub at gcc dot gnu.org> --- WIP that implements that. Except that we need some permutation expansion improvements, both for the SSE2 V4SImode permutation cases and for AVX2 V8SImode permutation cases. --- gcc/config/i386/sse.md.jj 2021-02-05 14:32:44.175463716 +0100 +++ gcc/config/i386/sse.md 2021-02-05 18:49:29.621590903 +0100 @@ -12458,7 +12458,7 @@ (set_attr "prefix" "orig,vex") (set_attr "mode" "<sseinsnmode>")]) -(define_insn "ashr<mode>3<mask_name>" +(define_insn "<mask_codefor>ashr<mode>3<mask_name>" [(set (match_operand:VI248_AVX512BW_AVX512VL 0 "register_operand" "=v,v") (ashiftrt:VI248_AVX512BW_AVX512VL (match_operand:VI248_AVX512BW_AVX512VL 1 "nonimmediate_operand" "v,vm") @@ -12472,6 +12472,125 @@ (const_string "0"))) (set_attr "mode" "<sseinsnmode>")]) +(define_expand "ashr<mode>3" + [(set (match_operand:VI248_AVX512BW 0 "register_operand") + (ashiftrt:VI248_AVX512BW + (match_operand:VI248_AVX512BW 1 "nonimmediate_operand") + (match_operand:DI 2 "nonmemory_operand")))] + "TARGET_AVX512F") + +(define_expand "ashrv4di3" + [(set (match_operand:V4DI 0 "register_operand") + (ashiftrt:V4DI + (match_operand:V4DI 1 "nonimmediate_operand") + (match_operand:DI 2 "nonmemory_operand")))] + "TARGET_AVX2" +{ + if (!TARGET_AVX512VL) + { + if (CONST_INT_P (operands[2]) && UINTVAL (operands[2]) >= 63) + { + rtx zero = force_reg (V4DImode, CONST0_RTX (V4DImode)); + emit_insn (gen_avx2_gtv4di3 (operands[0], zero, operands[1])); + DONE; + } + if (operands[2] == const0_rtx) + { + emit_move_insn (operands[0], operands[1]); + DONE; + } + if (CONST_INT_P (operands[2])) + { + vec_perm_builder sel (8, 8, 1); + sel.quick_grow (8); + rtx arg0, arg1; + rtx op1 = lowpart_subreg (V8SImode, operands[1], V4DImode); + rtx target = gen_reg_rtx (V8SImode); + if (INTVAL (operands[2]) > 32) + { + arg0 = gen_reg_rtx (V8SImode); + arg1 = gen_reg_rtx (V8SImode); + emit_insn (gen_ashrv8si3 (arg1, op1, GEN_INT (31))); + emit_insn (gen_ashrv8si3 (arg0, op1, + GEN_INT (INTVAL (operands[2]) - 32))); + sel[0] = 1; + sel[1] = 9; + sel[2] = 3; + sel[3] = 11; + sel[4] = 5; + sel[5] = 13; + sel[6] = 7; + sel[7] = 15; + } + else if (INTVAL (operands[2]) == 32) + { + arg0 = op1; + arg1 = gen_reg_rtx (V8SImode); + emit_insn (gen_ashrv8si3 (arg1, op1, GEN_INT (31))); + sel[0] = 1; + sel[1] = 9; + sel[2] = 3; + sel[3] = 11; + sel[4] = 5; + sel[5] = 13; + sel[6] = 7; + sel[7] = 15; + } + else + { + arg0 = gen_reg_rtx (V2DImode); + arg1 = gen_reg_rtx (V4SImode); + emit_insn (gen_lshrv2di3 (arg0, operands[1], operands[2])); + emit_insn (gen_ashrv4si3 (arg1, op1, operands[2])); + arg0 = lowpart_subreg (V4SImode, arg0, V2DImode); + sel[0] = 0; + sel[1] = 9; + sel[2] = 2; + sel[3] = 11; + sel[4] = 4; + sel[5] = 13; + sel[6] = 6; + sel[7] = 15; + } + vec_perm_indices indices (sel, 2, 8); + bool ok = targetm.vectorize.vec_perm_const (V8SImode, target, + arg0, arg1, indices); + gcc_assert (ok); + emit_move_insn (operands[0], + lowpart_subreg (V4DImode, target, V8SImode)); + DONE; + } + + rtx zero = force_reg (V4DImode, CONST0_RTX (V4DImode)); + rtx zero_or_all_ones = gen_reg_rtx (V4DImode); + emit_insn (gen_avx2_gtv4di3 (zero_or_all_ones, zero, operands[1])); + rtx lshr_res = gen_reg_rtx (V4DImode); + emit_insn (gen_lshrv4di3 (lshr_res, operands[1], operands[2])); + rtx ashl_res = gen_reg_rtx (V4DImode); + rtx amount; + if (TARGET_64BIT) + { + amount = gen_reg_rtx (DImode); + emit_insn (gen_subdi3 (amount, force_reg (DImode, GEN_INT (64)), + operands[2])); + } + else + { + rtx temp = gen_reg_rtx (SImode); + emit_insn (gen_subsi3 (temp, force_reg (SImode, GEN_INT (64)), + lowpart_subreg (SImode, operands[2], + DImode))); + amount = gen_reg_rtx (V4SImode); + emit_insn (gen_vec_setv4si_0 (amount, CONST0_RTX (V4SImode), + temp)); + } + amount = lowpart_subreg (DImode, amount, GET_MODE (amount)); + emit_insn (gen_ashlv4di3 (ashl_res, zero_or_all_ones, amount)); + emit_insn (gen_iorv4di3 (operands[0], lshr_res, ashl_res)); + DONE; + } +}) + (define_insn "<mask_codefor><insn><mode>3<mask_name>" [(set (match_operand:VI248_AVX512BW_2 0 "register_operand" "=v,v") (any_lshift:VI248_AVX512BW_2 @@ -20313,11 +20432,13 @@ (ashiftrt:V2DI (match_operand:V2DI 1 "register_operand") (match_operand:DI 2 "nonmemory_operand")))] - "TARGET_SSE4_2" + "TARGET_SSE2" { if (!TARGET_AVX512VL) { - if (CONST_INT_P (operands[2]) && INTVAL (operands[2]) == 63) + if (TARGET_SSE4_2 + && CONST_INT_P (operands[2]) + && UINTVAL (operands[2]) >= 63) { rtx zero = force_reg (V2DImode, CONST0_RTX (V2DImode)); emit_insn (gen_sse4_2_gtv2di3 (operands[0], zero, operands[1])); @@ -20328,6 +20449,65 @@ emit_move_insn (operands[0], operands[1]); DONE; } + if (CONST_INT_P (operands[2]) + && (!TARGET_XOP || UINTVAL (operands[2]) >= 63)) + { + vec_perm_builder sel (4, 4, 1); + sel.quick_grow (4); + rtx arg0, arg1; + rtx op1 = lowpart_subreg (V4SImode, operands[1], V2DImode); + rtx target = gen_reg_rtx (V4SImode); + if (UINTVAL (operands[2]) >= 63) + { + arg0 = arg1 = gen_reg_rtx (V4SImode); + emit_insn (gen_ashrv4si3 (arg0, op1, GEN_INT (31))); + sel[0] = 1; + sel[1] = 1; + sel[2] = 3; + sel[3] = 3; + } + else if (INTVAL (operands[2]) > 32) + { + arg0 = gen_reg_rtx (V4SImode); + arg1 = gen_reg_rtx (V4SImode); + emit_insn (gen_ashrv4si3 (arg1, op1, GEN_INT (31))); + emit_insn (gen_ashrv4si3 (arg0, op1, + GEN_INT (INTVAL (operands[2]) - 32))); + sel[0] = 1; + sel[1] = 5; + sel[2] = 3; + sel[3] = 7; + } + else if (INTVAL (operands[2]) == 32) + { + arg0 = op1; + arg1 = gen_reg_rtx (V4SImode); + emit_insn (gen_ashrv4si3 (arg1, op1, GEN_INT (31))); + sel[0] = 1; + sel[1] = 5; + sel[2] = 3; + sel[3] = 7; + } + else + { + arg0 = gen_reg_rtx (V2DImode); + arg1 = gen_reg_rtx (V4SImode); + emit_insn (gen_lshrv2di3 (arg0, operands[1], operands[2])); + emit_insn (gen_ashrv4si3 (arg1, op1, operands[2])); + arg0 = lowpart_subreg (V4SImode, arg0, V2DImode); + sel[0] = 0; + sel[1] = 5; + sel[2] = 2; + sel[3] = 7; + } + vec_perm_indices indices (sel, arg0 != arg1 ? 2 : 1, 4); + bool ok = targetm.vectorize.vec_perm_const (V4SImode, target, + arg0, arg1, indices); + gcc_assert (ok); + emit_move_insn (operands[0], + lowpart_subreg (V2DImode, target, V4SImode)); + DONE; + } if (!TARGET_XOP) { rtx zero = force_reg (V2DImode, CONST0_RTX (V2DImode)); @@ -20337,9 +20517,7 @@ emit_insn (gen_lshrv2di3 (lshr_res, operands[1], operands[2])); rtx ashl_res = gen_reg_rtx (V2DImode); rtx amount; - if (CONST_INT_P (operands[2])) - amount = GEN_INT (64 - INTVAL (operands[2])); - else if (TARGET_64BIT) + if (TARGET_64BIT) { amount = gen_reg_rtx (DImode); emit_insn (gen_subdi3 (amount, force_reg (DImode, GEN_INT (64)), @@ -20355,8 +20533,7 @@ emit_insn (gen_vec_setv4si_0 (amount, CONST0_RTX (V4SImode), temp)); } - if (!CONST_INT_P (operands[2])) - amount = lowpart_subreg (DImode, amount, GET_MODE (amount)); + amount = lowpart_subreg (DImode, amount, GET_MODE (amount)); emit_insn (gen_ashlv2di3 (ashl_res, zero_or_all_ones, amount)); emit_insn (gen_iorv2di3 (operands[0], lshr_res, ashl_res)); DONE;