Author: Simon Pilgrim Date: 2020-12-16T11:24:40Z New Revision: e55f7de946b1be546b05ce7399ec4f8fc7980cf5
URL: https://github.com/llvm/llvm-project/commit/e55f7de946b1be546b05ce7399ec4f8fc7980cf5 DIFF: https://github.com/llvm/llvm-project/commit/e55f7de946b1be546b05ce7399ec4f8fc7980cf5.diff LOG: [X86][SSE] combineReductionToHorizontal - don't rely on widenSubVector to handle illegal vector types. Thanks to @asbirlea for reporting the bug. Added: Modified: llvm/lib/Target/X86/X86ISelLowering.cpp llvm/test/CodeGen/X86/vector-reduce-mul.ll Removed: ################################################################################ diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 40717e85e27f..7be12238956e 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -40123,7 +40123,11 @@ static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG, Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi); } } else { - Rdx = widenSubVector(Rdx, false, Subtarget, DAG, DL, 128); + if (VecVT == MVT::v4i8) + Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx, + DAG.getUNDEF(MVT::v4i8)); + Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx, + DAG.getUNDEF(MVT::v8i8)); Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8)); Rdx = DAG.getBitcast(MVT::v8i16, Rdx); } diff --git a/llvm/test/CodeGen/X86/vector-reduce-mul.ll b/llvm/test/CodeGen/X86/vector-reduce-mul.ll index e284f9421ef5..5484eeeff45a 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-mul.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-mul.ll @@ -2236,6 +2236,114 @@ define i8 @test_v128i8(<128 x i8> %a0) { ret i8 %1 } +; +; Legalization +; + +define i8 @illegal_v4i8(i8 %a0, <4 x i8>* %a1) { +; SSE2-LABEL: illegal_v4i8: +; SSE2: # %bb.0: +; SSE2-NEXT: movl %edi, %eax +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrld $16, %xmm0 +; SSE2-NEXT: pmullw %xmm1, %xmm0 +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: # kill: def $al killed $al killed $eax +; SSE2-NEXT: mulb %cl +; SSE2-NEXT: retq +; +; SSE41-LABEL: illegal_v4i8: +; SSE41: # %bb.0: +; SSE41-NEXT: movl %edi, %eax +; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; SSE41-NEXT: pmullw %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psrld $16, %xmm0 +; SSE41-NEXT: pmullw %xmm1, %xmm0 +; SSE41-NEXT: movd %xmm0, %ecx +; SSE41-NEXT: # kill: def $al killed $al killed $eax +; SSE41-NEXT: mulb %cl +; SSE41-NEXT: retq +; +; AVX-LABEL: illegal_v4i8: +; AVX: # %bb.0: +; AVX-NEXT: movl %edi, %eax +; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrld $16, %xmm0, %xmm1 +; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, %ecx +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: mulb %cl +; AVX-NEXT: retq + %ld = load <4 x i8>, <4 x i8>* %a1, align 4 + %rdx = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %ld) + %mul = mul i8 %a0, %rdx + ret i8 %mul +} + +define i8 @illegal_v8i8(i8 %a0, <8 x i8>* %a1) { +; SSE2-LABEL: illegal_v8i8: +; SSE2: # %bb.0: +; SSE2-NEXT: movl %edi, %eax +; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] +; SSE2-NEXT: pmullw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrld $16, %xmm1 +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: # kill: def $al killed $al killed $eax +; SSE2-NEXT: mulb %cl +; SSE2-NEXT: retq +; +; SSE41-LABEL: illegal_v8i8: +; SSE41: # %bb.0: +; SSE41-NEXT: movl %edi, %eax +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; SSE41-NEXT: pmullw %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1] +; SSE41-NEXT: pmullw %xmm1, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrld $16, %xmm1 +; SSE41-NEXT: pmullw %xmm0, %xmm1 +; SSE41-NEXT: movd %xmm1, %ecx +; SSE41-NEXT: # kill: def $al killed $al killed $eax +; SSE41-NEXT: mulb %cl +; SSE41-NEXT: retq +; +; AVX-LABEL: illegal_v8i8: +; AVX: # %bb.0: +; AVX-NEXT: movl %edi, %eax +; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrld $16, %xmm0, %xmm1 +; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, %ecx +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: mulb %cl +; AVX-NEXT: retq + %ld = load <8 x i8>, <8 x i8>* %a1, align 4 + %rdx = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %ld) + %mul = mul i8 %a0, %rdx + ret i8 %mul +} + declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>) declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>) declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>) _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits