https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98856
--- Comment #12 from Jakub Jelinek <jakub at gcc dot gnu.org> --- V4DImode arithmetic right shifts would be (untested): --- gcc/config/i386/sse.md.jj 2021-02-05 14:32:44.175463716 +0100 +++ gcc/config/i386/sse.md 2021-02-05 15:24:37.942026401 +0100 @@ -12458,7 +12458,7 @@ (set_attr "prefix" "orig,vex") (set_attr "mode" "<sseinsnmode>")]) -(define_insn "ashr<mode>3<mask_name>" +(define_insn "<mask_codefor>ashr<mode>3<mask_name>" [(set (match_operand:VI248_AVX512BW_AVX512VL 0 "register_operand" "=v,v") (ashiftrt:VI248_AVX512BW_AVX512VL (match_operand:VI248_AVX512BW_AVX512VL 1 "nonimmediate_operand" "v,vm") @@ -12472,6 +12472,67 @@ (const_string "0"))) (set_attr "mode" "<sseinsnmode>")]) +(define_expand "ashr<mode>3" + [(set (match_operand:VI248_AVX512BW 0 "register_operand") + (ashiftrt:VI248_AVX512BW + (match_operand:VI248_AVX512BW 1 "nonimmediate_operand") + (match_operand:DI 2 "nonmemory_operand")))] + "TARGET_AVX512F") + +(define_expand "ashrv4di3" + [(set (match_operand:V4DI 0 "register_operand") + (ashiftrt:V4DI + (match_operand:V4DI 1 "nonimmediate_operand") + (match_operand:DI 2 "nonmemory_operand")))] + "TARGET_AVX2" +{ + if (!TARGET_AVX512VL) + { + if (CONST_INT_P (operands[2]) && INTVAL (operands[2]) == 63) + { + rtx zero = force_reg (V4DImode, CONST0_RTX (V4DImode)); + emit_insn (gen_avx2_gtv4di3 (operands[0], zero, operands[1])); + DONE; + } + if (operands[2] == const0_rtx) + { + emit_move_insn (operands[0], operands[1]); + DONE; + } + + rtx zero = force_reg (V4DImode, CONST0_RTX (V4DImode)); + rtx zero_or_all_ones = gen_reg_rtx (V4DImode); + emit_insn (gen_avx2_gtv4di3 (zero_or_all_ones, zero, operands[1])); + rtx lshr_res = gen_reg_rtx (V4DImode); + emit_insn (gen_lshrv4di3 (lshr_res, operands[1], operands[2])); + rtx ashl_res = gen_reg_rtx (V4DImode); + rtx amount; + if (CONST_INT_P (operands[2])) + amount = GEN_INT (64 - INTVAL (operands[2])); + else if (TARGET_64BIT) + { + amount = gen_reg_rtx (DImode); + emit_insn (gen_subdi3 (amount, force_reg (DImode, GEN_INT (64)), + operands[2])); + } + else + { + rtx temp = gen_reg_rtx (SImode); + emit_insn (gen_subsi3 (temp, force_reg (SImode, GEN_INT (64)), + lowpart_subreg (SImode, operands[2], + DImode))); + amount = gen_reg_rtx (V4SImode); + emit_insn (gen_vec_setv4si_0 (amount, CONST0_RTX (V4SImode), + temp)); + } + if (!CONST_INT_P (operands[2])) + amount = lowpart_subreg (DImode, amount, GET_MODE (amount)); + emit_insn (gen_ashlv4di3 (ashl_res, zero_or_all_ones, amount)); + emit_insn (gen_iorv4di3 (operands[0], lshr_res, ashl_res)); + DONE; + } +}) + (define_insn "<mask_codefor><insn><mode>3<mask_name>" [(set (match_operand:VI248_AVX512BW_2 0 "register_operand" "=v,v") (any_lshift:VI248_AVX512BW_2 Trying 3 different routines, one returning >> 63 of a V4DImode vector, another one >> 17 and another one >> var, the differences with -mavx2 are: - vextracti128 $0x1, %ymm0, %xmm1 - vmovq %xmm0, %rax - vpextrq $1, %xmm0, %rcx - cqto - vmovq %xmm1, %rax - sarq $63, %rcx - sarq $63, %rax - vmovq %rdx, %xmm3 - movq %rax, %rsi - vpextrq $1, %xmm1, %rax - vpinsrq $1, %rcx, %xmm3, %xmm0 - sarq $63, %rax - vmovq %rsi, %xmm2 - vpinsrq $1, %rax, %xmm2, %xmm1 - vinserti128 $0x1, %xmm1, %ymm0, %ymm0 + vmovdqa %ymm0, %ymm1 + vpxor %xmm0, %xmm0, %xmm0 + vpcmpgtq %ymm1, %ymm0, %ymm0 - vmovq %xmm0, %rax - vextracti128 $0x1, %ymm0, %xmm1 - vpextrq $1, %xmm0, %rcx - sarq $17, %rax - sarq $17, %rcx - movq %rax, %rdx - vmovq %xmm1, %rax - sarq $17, %rax - vmovq %rdx, %xmm3 - movq %rax, %rsi - vpextrq $1, %xmm1, %rax - vpinsrq $1, %rcx, %xmm3, %xmm0 - sarq $17, %rax - vmovq %rsi, %xmm2 - vpinsrq $1, %rax, %xmm2, %xmm1 - vinserti128 $0x1, %xmm1, %ymm0, %ymm0 + vpxor %xmm1, %xmm1, %xmm1 + vpcmpgtq %ymm0, %ymm1, %ymm1 + vpsrlq $17, %ymm0, %ymm0 + vpsllq $47, %ymm1, %ymm1 + vpor %ymm1, %ymm0, %ymm0 and - movl %edi, %ecx - vmovq %xmm0, %rax - vextracti128 $0x1, %ymm0, %xmm1 - sarq %cl, %rax - vpextrq $1, %xmm0, %rsi - movq %rax, %rdx - vmovq %xmm1, %rax - sarq %cl, %rsi - sarq %cl, %rax - vmovq %rdx, %xmm3 - movq %rax, %rdi - vpextrq $1, %xmm1, %rax - vpinsrq $1, %rsi, %xmm3, %xmm0 - sarq %cl, %rax + vpxor %xmm1, %xmm1, %xmm1 + movslq %edi, %rdi + movl $64, %eax + vpcmpgtq %ymm0, %ymm1, %ymm1 + subq %rdi, %rax vmovq %rdi, %xmm2 - vpinsrq $1, %rax, %xmm2, %xmm1 - vinserti128 $0x1, %xmm1, %ymm0, %ymm0 + vmovq %rax, %xmm3 + vpsrlq %xmm2, %ymm0, %ymm0 + vpsllq %xmm3, %ymm1, %ymm1 + vpor %ymm1, %ymm0, %ymm0 so at least size-wise much smaller.