https://github.com/bjope updated https://github.com/llvm/llvm-project/pull/143103
From f062333a77f3c9382363daea4f025463762a58f7 Mon Sep 17 00:00:00 2001 From: Bjorn Pettersson <bjorn.a.petters...@ericsson.com> Date: Tue, 3 Jun 2025 10:01:01 +0200 Subject: [PATCH] [SelectionDAG] Deal with POISON for INSERT_VECTOR_ELT/INSERT_SUBVECTOR (part 2) Add support in isGuaranteedNotToBeUndefOrPoison to avoid regressions seen after a previous commit fixing #141034. --- llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 6 + .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 70 +++++ llvm/test/CodeGen/Thumb2/mve-vld3.ll | 4 +- .../X86/merge-consecutive-loads-128.ll | 78 ++---- llvm/test/CodeGen/X86/mmx-build-vector.ll | 255 +++++------------- llvm/test/CodeGen/X86/pr62286.ll | 14 +- .../CodeGen/X86/vector-shuffle-combining.ll | 21 +- llvm/test/CodeGen/X86/vector-trunc.ll | 140 ++-------- .../zero_extend_vector_inreg_of_broadcast.ll | 3 +- 9 files changed, 205 insertions(+), 386 deletions(-) diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index 92da4ef7f0556..587e8fb5a1d24 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1881,6 +1881,12 @@ LLVM_ABI SDValue peekThroughExtractSubvectors(SDValue V); /// If \p V is not a truncation, it is returned as-is. LLVM_ABI SDValue peekThroughTruncates(SDValue V); +/// Recursively peek through INSERT_VECTOR_ELT nodes, returning the source +/// vector operand of \p V, as long as \p V is an INSERT_VECTOR_ELT operation +/// that do not insert into any of the demanded vector elts. +LLVM_ABI SDValue peekThroughInsertVectorElt(SDValue V, + const APInt &DemandedElts); + /// Returns true if \p V is a bitwise not operation. Assumes that an all ones /// constant is canonicalized to be operand 1. LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs = false); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 3d4e4cd355da4..79f60167cec24 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -5454,6 +5454,59 @@ bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, } return true; + case ISD::INSERT_SUBVECTOR: { + if (Op.getValueType().isScalableVector()) + break; + SDValue Src = Op.getOperand(0); + SDValue Sub = Op.getOperand(1); + uint64_t Idx = Op.getConstantOperandVal(2); + unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); + APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); + APInt DemandedSrcElts = DemandedElts; + DemandedSrcElts.clearBits(Idx, Idx + NumSubElts); + + if (!!DemandedSubElts && !isGuaranteedNotToBeUndefOrPoison( + Sub, DemandedSubElts, PoisonOnly, Depth + 1)) + return false; + if (!!DemandedSrcElts && !isGuaranteedNotToBeUndefOrPoison( + Src, DemandedSrcElts, PoisonOnly, Depth + 1)) + return false; + return true; + } + + case ISD::INSERT_VECTOR_ELT: { + SDValue InVec = Op.getOperand(0); + SDValue InVal = Op.getOperand(1); + SDValue EltNo = Op.getOperand(2); + EVT VT = InVec.getValueType(); + auto *IndexC = dyn_cast<ConstantSDNode>(EltNo); + if (IndexC && VT.isFixedLengthVector() && + IndexC->getZExtValue() < VT.getVectorNumElements()) { + if (DemandedElts[IndexC->getZExtValue()] && + !isGuaranteedNotToBeUndefOrPoison(InVal, PoisonOnly, Depth + 1)) + return false; + APInt InVecDemandedElts = DemandedElts; + InVecDemandedElts.clearBit(IndexC->getZExtValue()); + if (!!InVecDemandedElts && + !isGuaranteedNotToBeUndefOrPoison( + peekThroughInsertVectorElt(InVec, InVecDemandedElts), + InVecDemandedElts, PoisonOnly, Depth + 1)) + return false; + return true; + } + break; + } + + case ISD::SCALAR_TO_VECTOR: + // If only demanding upper (undef) elements. + if (DemandedElts.ugt(1)) + return PoisonOnly; + // If only demanding element 0, or only considering poison. + if (PoisonOnly || DemandedElts == 0) + return isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), PoisonOnly, + Depth + 1); + return false; + case ISD::SPLAT_VECTOR: return isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), PoisonOnly, Depth + 1); @@ -12476,6 +12529,23 @@ SDValue llvm::peekThroughTruncates(SDValue V) { return V; } +SDValue llvm::peekThroughInsertVectorElt(SDValue V, const APInt &DemandedElts) { + while (V.getOpcode() == ISD::INSERT_VECTOR_ELT) { + SDValue InVec = V.getOperand(0); + SDValue EltNo = V.getOperand(2); + EVT VT = InVec.getValueType(); + auto *IndexC = dyn_cast<ConstantSDNode>(EltNo); + if (IndexC && VT.isFixedLengthVector() && + IndexC->getZExtValue() < VT.getVectorNumElements() && + !DemandedElts[IndexC->getZExtValue()]) { + V = InVec; + continue; + } + break; + } + return V; +} + bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { if (V.getOpcode() != ISD::XOR) return false; diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll index 38e42c137e3a9..4dd9173e2d418 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll @@ -663,8 +663,8 @@ define void @vld3_v2i8(ptr %src, ptr %dst) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .pad #8 ; CHECK-NEXT: sub sp, #8 -; CHECK-NEXT: ldrd r2, r0, [r0] -; CHECK-NEXT: strd r2, r0, [sp] +; CHECK-NEXT: ldrd r0, r2, [r0] +; CHECK-NEXT: strd r0, r2, [sp] ; CHECK-NEXT: mov r0, sp ; CHECK-NEXT: vldrb.u16 q0, [r0] ; CHECK-NEXT: vmov.u16 r0, q0[4] diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll index 1df4e9f47f21b..595f8491b405c 100644 --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll @@ -262,54 +262,37 @@ define <4 x float> @merge_4f32_f32_45zz(ptr %ptr) nounwind uwtable noinline ssp define <4 x float> @merge_4f32_f32_012u(ptr %ptr) nounwind uwtable noinline ssp { ; SSE2-LABEL: merge_4f32_f32_012u: ; SSE2: # %bb.0: -; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: retq ; ; SSE41-LABEL: merge_4f32_f32_012u: ; SSE41: # %bb.0: -; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] -; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: merge_4f32_f32_012u: ; AVX: # %bb.0: -; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; AVX-NEXT: retq ; ; X86-SSE1-LABEL: merge_4f32_f32_012u: ; X86-SSE1: # %bb.0: ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE1-NEXT: xorps %xmm0, %xmm0 +; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; X86-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X86-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X86-SSE1-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; X86-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; X86-SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; X86-SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; X86-SSE1-NEXT: retl ; ; X86-SSE41-LABEL: merge_4f32_f32_012u: ; X86-SSE41: # %bb.0: ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X86-SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] -; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; X86-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; X86-SSE41-NEXT: retl %ptr1 = getelementptr inbounds float, ptr %ptr, i64 1 %ptr2 = getelementptr inbounds float, ptr %ptr, i64 2 @@ -326,54 +309,37 @@ define <4 x float> @merge_4f32_f32_012u(ptr %ptr) nounwind uwtable noinline ssp define <4 x float> @merge_4f32_f32_019u(ptr %ptr) nounwind uwtable noinline ssp { ; SSE2-LABEL: merge_4f32_f32_019u: ; SSE2: # %bb.0: -; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: retq ; ; SSE41-LABEL: merge_4f32_f32_019u: ; SSE41: # %bb.0: -; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] -; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: merge_4f32_f32_019u: ; AVX: # %bb.0: -; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; AVX-NEXT: retq ; ; X86-SSE1-LABEL: merge_4f32_f32_019u: ; X86-SSE1: # %bb.0: ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE1-NEXT: xorps %xmm0, %xmm0 +; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; X86-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X86-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X86-SSE1-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; X86-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; X86-SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; X86-SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; X86-SSE1-NEXT: retl ; ; X86-SSE41-LABEL: merge_4f32_f32_019u: ; X86-SSE41: # %bb.0: ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X86-SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] -; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; X86-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; X86-SSE41-NEXT: retl %ptr1 = getelementptr inbounds float, ptr %ptr, i64 1 %ptr2 = getelementptr inbounds float, ptr %ptr, i64 9 diff --git a/llvm/test/CodeGen/X86/mmx-build-vector.ll b/llvm/test/CodeGen/X86/mmx-build-vector.ll index 6b1bedc1c5982..d8a010bacc683 100644 --- a/llvm/test/CodeGen/X86/mmx-build-vector.ll +++ b/llvm/test/CodeGen/X86/mmx-build-vector.ll @@ -2,11 +2,11 @@ ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx | FileCheck %s --check-prefixes=X86,X86-MMX ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefixes=X86,X86-SSE ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+ssse3 | FileCheck %s --check-prefixes=X86,X86-SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefixes=X64,X64-SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+ssse3 | FileCheck %s --check-prefixes=X64,X64-SSSE3 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx | FileCheck %s --check-prefixes=X64,X64-AVX -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx512f | FileCheck %s --check-prefixes=X64,X64-AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+ssse3 | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx2 | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx512f | FileCheck %s --check-prefix=X64 declare <1 x i64> @llvm.x86.mmx.padd.d(<1 x i64>, <1 x i64>) @@ -290,21 +290,15 @@ define void @build_v4i16_0zuz(ptr%p0, i16 %a0, i16 %a1, i16 %a2, i16 %a3) nounwi define void @build_v4i16_012u(ptr%p0, i16 %a0, i16 %a1, i16 %a2, i16 %a3) nounwind { ; X86-LABEL: build_v4i16_012u: ; X86: # %bb.0: -; X86-NEXT: pushl %esi -; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movzwl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movd %eax, %mm0 -; X86-NEXT: movd %esi, %mm1 -; X86-NEXT: punpcklwd %mm0, %mm1 # mm1 = mm1[0],mm0[0],mm1[1],mm0[1] -; X86-NEXT: movd %edx, %mm0 -; X86-NEXT: movd %ecx, %mm2 -; X86-NEXT: punpcklwd %mm0, %mm2 # mm2 = mm2[0],mm0[0],mm2[1],mm0[1] -; X86-NEXT: punpckldq %mm1, %mm2 # mm2 = mm2[0],mm1[0] +; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0 +; X86-NEXT: punpcklwd %mm0, %mm0 # mm0 = mm0[0,0,1,1] +; X86-NEXT: movd {{[0-9]+}}(%esp), %mm1 +; X86-NEXT: movd {{[0-9]+}}(%esp), %mm2 +; X86-NEXT: punpcklwd %mm1, %mm2 # mm2 = mm2[0],mm1[0],mm2[1],mm1[1] +; X86-NEXT: punpckldq %mm0, %mm2 # mm2 = mm2[0],mm0[0] ; X86-NEXT: paddd %mm2, %mm2 ; X86-NEXT: movq %mm2, (%eax) -; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: build_v4i16_012u: @@ -481,126 +475,45 @@ define void @build_v8i8_0u2345z7(ptr%p0, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, ret void } - -; Recursion depth limit in isGuaranteedNotToBeUndefOrPoison prevents llc from -; detecting that we insert an "undef" element in a position that already is -; undef. OTOH, opt would optimize away that insertelement operation from the -; IR, so maybe that isn't a problem in reality. define void @build_v8i8_0123zzzu(ptr%p0, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7) nounwind { -; X86-MMX-LABEL: build_v8i8_0123zzzu: -; X86-MMX: # %bb.0: -; X86-MMX-NEXT: pushl %ebp -; X86-MMX-NEXT: movl %esp, %ebp -; X86-MMX-NEXT: pushl %esi -; X86-MMX-NEXT: andl $-8, %esp -; X86-MMX-NEXT: subl $16, %esp -; X86-MMX-NEXT: movl 8(%ebp), %eax -; X86-MMX-NEXT: movzbl 20(%ebp), %edx -; X86-MMX-NEXT: movzbl 24(%ebp), %ecx -; X86-MMX-NEXT: shll $8, %ecx -; X86-MMX-NEXT: orl %edx, %ecx -; X86-MMX-NEXT: shll $16, %ecx -; X86-MMX-NEXT: movzbl 12(%ebp), %edx -; X86-MMX-NEXT: movzbl 16(%ebp), %esi -; X86-MMX-NEXT: shll $8, %esi -; X86-MMX-NEXT: orl %edx, %esi -; X86-MMX-NEXT: movzwl %si, %edx -; X86-MMX-NEXT: orl %ecx, %edx -; X86-MMX-NEXT: movzbl %al, %ecx -; X86-MMX-NEXT: shll $24, %ecx -; X86-MMX-NEXT: movl %ecx, {{[0-9]+}}(%esp) -; X86-MMX-NEXT: movl %edx, (%esp) -; X86-MMX-NEXT: movq (%esp), %mm0 -; X86-MMX-NEXT: paddd %mm0, %mm0 -; X86-MMX-NEXT: movq %mm0, (%eax) -; X86-MMX-NEXT: leal -4(%ebp), %esp -; X86-MMX-NEXT: popl %esi -; X86-MMX-NEXT: popl %ebp -; X86-MMX-NEXT: retl -; -; X86-SSE-LABEL: build_v8i8_0123zzzu: -; X86-SSE: # %bb.0: -; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %edx -; X86-SSE-NEXT: shll $8, %edx -; X86-SSE-NEXT: orl %ecx, %edx -; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-SSE-NEXT: shll $16, %ecx -; X86-SSE-NEXT: orl %edx, %ecx -; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %edx -; X86-SSE-NEXT: shll $24, %edx -; X86-SSE-NEXT: orl %ecx, %edx -; X86-SSE-NEXT: movd %edx, %xmm0 -; X86-SSE-NEXT: movdq2q %xmm0, %mm0 -; X86-SSE-NEXT: paddd %mm0, %mm0 -; X86-SSE-NEXT: movq %mm0, (%eax) -; X86-SSE-NEXT: retl +; X86-LABEL: build_v8i8_0123zzzu: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0 +; X86-NEXT: movd {{[0-9]+}}(%esp), %mm1 +; X86-NEXT: punpcklbw %mm0, %mm1 # mm1 = mm1[0],mm0[0],mm1[1],mm0[1],mm1[2],mm0[2],mm1[3],mm0[3] +; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0 +; X86-NEXT: movd {{[0-9]+}}(%esp), %mm2 +; X86-NEXT: punpcklbw %mm0, %mm2 # mm2 = mm2[0],mm0[0],mm2[1],mm0[1],mm2[2],mm0[2],mm2[3],mm0[3] +; X86-NEXT: punpcklwd %mm1, %mm2 # mm2 = mm2[0],mm1[0],mm2[1],mm1[1] +; X86-NEXT: pxor %mm0, %mm0 +; X86-NEXT: pxor %mm1, %mm1 +; X86-NEXT: punpcklbw %mm1, %mm1 # mm1 = mm1[0,0,1,1,2,2,3,3] +; X86-NEXT: punpcklbw %mm0, %mm0 # mm0 = mm0[0,0,1,1,2,2,3,3] +; X86-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] +; X86-NEXT: punpckldq %mm0, %mm2 # mm2 = mm2[0],mm0[0] +; X86-NEXT: paddd %mm2, %mm2 +; X86-NEXT: movq %mm2, (%eax) +; X86-NEXT: retl ; -; X64-SSE2-LABEL: build_v8i8_0123zzzu: -; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: movd %esi, %xmm0 -; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; X64-SSE2-NEXT: pand %xmm1, %xmm2 -; X64-SSE2-NEXT: pandn %xmm0, %xmm1 -; X64-SSE2-NEXT: por %xmm2, %xmm1 -; X64-SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; X64-SSE2-NEXT: pand %xmm0, %xmm1 -; X64-SSE2-NEXT: movd %edx, %xmm2 -; X64-SSE2-NEXT: psllw $8, %xmm2 -; X64-SSE2-NEXT: pandn %xmm2, %xmm0 -; X64-SSE2-NEXT: por %xmm1, %xmm0 -; X64-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255] -; X64-SSE2-NEXT: pand %xmm1, %xmm0 -; X64-SSE2-NEXT: movd %ecx, %xmm2 -; X64-SSE2-NEXT: pslld $16, %xmm2 -; X64-SSE2-NEXT: pandn %xmm2, %xmm1 -; X64-SSE2-NEXT: por %xmm0, %xmm1 -; X64-SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255] -; X64-SSE2-NEXT: pand %xmm0, %xmm1 -; X64-SSE2-NEXT: movd %r8d, %xmm2 -; X64-SSE2-NEXT: pslld $24, %xmm2 -; X64-SSE2-NEXT: pandn %xmm2, %xmm0 -; X64-SSE2-NEXT: por %xmm1, %xmm0 -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; X64-SSE2-NEXT: movdq2q %xmm0, %mm0 -; X64-SSE2-NEXT: paddd %mm0, %mm0 -; X64-SSE2-NEXT: movq %mm0, (%rdi) -; X64-SSE2-NEXT: retq -; -; X64-SSSE3-LABEL: build_v8i8_0123zzzu: -; X64-SSSE3: # %bb.0: -; X64-SSSE3-NEXT: movd %esi, %xmm0 -; X64-SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm0[0,1,2,3] -; X64-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[u,u,u,u,u,7,u,u,u,u,u,u,u,u] -; X64-SSSE3-NEXT: movd %edx, %xmm1 -; X64-SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[0,u,u,u,u,u],zero,xmm1[u,u,u,u,u,u,u,u] -; X64-SSSE3-NEXT: por %xmm0, %xmm1 -; X64-SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1],zero,xmm1[u,u,u,u,7,u,u,u,u,u,u,u,u] -; X64-SSSE3-NEXT: movd %ecx, %xmm0 -; X64-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,xmm0[0,u,u,u,u],zero,xmm0[u,u,u,u,u,u,u,u] -; X64-SSSE3-NEXT: por %xmm1, %xmm0 -; X64-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2],zero,xmm0[u,u,u,7,u,u,u,u,u,u,u,u] -; X64-SSSE3-NEXT: movd %r8d, %xmm1 -; X64-SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[0,u,u,u],zero,xmm1[u,u,u,u,u,u,u,u] -; X64-SSSE3-NEXT: por %xmm0, %xmm1 -; X64-SSSE3-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; X64-SSSE3-NEXT: movdq2q %xmm1, %mm0 -; X64-SSSE3-NEXT: paddd %mm0, %mm0 -; X64-SSSE3-NEXT: movq %mm0, (%rdi) -; X64-SSSE3-NEXT: retq -; -; X64-AVX-LABEL: build_v8i8_0123zzzu: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpinsrb $0, %esi, %xmm0, %xmm0 -; X64-AVX-NEXT: vpinsrb $1, %edx, %xmm0, %xmm0 -; X64-AVX-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; X64-AVX-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0 -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; X64-AVX-NEXT: movdq2q %xmm0, %mm0 -; X64-AVX-NEXT: paddd %mm0, %mm0 -; X64-AVX-NEXT: movq %mm0, (%rdi) -; X64-AVX-NEXT: retq +; X64-LABEL: build_v8i8_0123zzzu: +; X64: # %bb.0: +; X64-NEXT: movd %r8d, %mm0 +; X64-NEXT: movd %ecx, %mm1 +; X64-NEXT: punpcklbw %mm0, %mm1 # mm1 = mm1[0],mm0[0],mm1[1],mm0[1],mm1[2],mm0[2],mm1[3],mm0[3] +; X64-NEXT: movd %edx, %mm0 +; X64-NEXT: movd %esi, %mm2 +; X64-NEXT: punpcklbw %mm0, %mm2 # mm2 = mm2[0],mm0[0],mm2[1],mm0[1],mm2[2],mm0[2],mm2[3],mm0[3] +; X64-NEXT: punpcklwd %mm1, %mm2 # mm2 = mm2[0],mm1[0],mm2[1],mm1[1] +; X64-NEXT: pxor %mm0, %mm0 +; X64-NEXT: pxor %mm1, %mm1 +; X64-NEXT: punpcklbw %mm1, %mm1 # mm1 = mm1[0,0,1,1,2,2,3,3] +; X64-NEXT: punpcklbw %mm0, %mm0 # mm0 = mm0[0,0,1,1,2,2,3,3] +; X64-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1] +; X64-NEXT: punpckldq %mm0, %mm2 # mm2 = mm2[0],mm0[0] +; X64-NEXT: paddd %mm2, %mm2 +; X64-NEXT: movq %mm2, (%rdi) +; X64-NEXT: retq %1 = insertelement <8 x i8> undef, i8 %a0, i32 0 %2 = insertelement <8 x i8> %1, i8 %a1, i32 1 %3 = insertelement <8 x i8> %2, i8 %a2, i32 2 @@ -645,64 +558,22 @@ define void @build_v8i8_0uuuuzzz(ptr%p0, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, } define void @build_v8i8_0zzzzzzu(ptr%p0, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7) nounwind { -; X86-MMX-LABEL: build_v8i8_0zzzzzzu: -; X86-MMX: # %bb.0: -; X86-MMX-NEXT: pushl %ebp -; X86-MMX-NEXT: movl %esp, %ebp -; X86-MMX-NEXT: andl $-8, %esp -; X86-MMX-NEXT: subl $8, %esp -; X86-MMX-NEXT: movl 8(%ebp), %eax -; X86-MMX-NEXT: movzbl 12(%ebp), %ecx -; X86-MMX-NEXT: movl %ecx, (%esp) -; X86-MMX-NEXT: movzbl %al, %ecx -; X86-MMX-NEXT: shll $24, %ecx -; X86-MMX-NEXT: movl %ecx, {{[0-9]+}}(%esp) -; X86-MMX-NEXT: movq (%esp), %mm0 -; X86-MMX-NEXT: paddd %mm0, %mm0 -; X86-MMX-NEXT: movq %mm0, (%eax) -; X86-MMX-NEXT: movl %ebp, %esp -; X86-MMX-NEXT: popl %ebp -; X86-MMX-NEXT: retl -; -; X86-SSE-LABEL: build_v8i8_0zzzzzzu: -; X86-SSE: # %bb.0: -; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-SSE-NEXT: movd %ecx, %xmm0 -; X86-SSE-NEXT: movdq2q %xmm0, %mm0 -; X86-SSE-NEXT: paddd %mm0, %mm0 -; X86-SSE-NEXT: movq %mm0, (%eax) -; X86-SSE-NEXT: retl +; X86-LABEL: build_v8i8_0zzzzzzu: +; X86: # %bb.0: +; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movd %eax, %mm0 +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: paddd %mm0, %mm0 +; X86-NEXT: movq %mm0, (%eax) +; X86-NEXT: retl ; -; X64-SSE2-LABEL: build_v8i8_0zzzzzzu: -; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: movd %esi, %xmm0 -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; X64-SSE2-NEXT: por %xmm0, %xmm1 -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; X64-SSE2-NEXT: movdq2q %xmm1, %mm0 -; X64-SSE2-NEXT: paddd %mm0, %mm0 -; X64-SSE2-NEXT: movq %mm0, (%rdi) -; X64-SSE2-NEXT: retq -; -; X64-SSSE3-LABEL: build_v8i8_0zzzzzzu: -; X64-SSSE3: # %bb.0: -; X64-SSSE3-NEXT: movd %esi, %xmm0 -; X64-SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0] -; X64-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,zero -; X64-SSSE3-NEXT: movdq2q %xmm0, %mm0 -; X64-SSSE3-NEXT: paddd %mm0, %mm0 -; X64-SSSE3-NEXT: movq %mm0, (%rdi) -; X64-SSSE3-NEXT: retq -; -; X64-AVX-LABEL: build_v8i8_0zzzzzzu: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpinsrb $0, %esi, %xmm0, %xmm0 -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; X64-AVX-NEXT: movdq2q %xmm0, %mm0 -; X64-AVX-NEXT: paddd %mm0, %mm0 -; X64-AVX-NEXT: movq %mm0, (%rdi) -; X64-AVX-NEXT: retq +; X64-LABEL: build_v8i8_0zzzzzzu: +; X64: # %bb.0: +; X64-NEXT: movzbl %sil, %eax +; X64-NEXT: movd %eax, %mm0 +; X64-NEXT: paddd %mm0, %mm0 +; X64-NEXT: movq %mm0, (%rdi) +; X64-NEXT: retq %1 = insertelement <8 x i8> undef, i8 %a0, i32 0 %2 = insertelement <8 x i8> %1, i8 0, i32 1 %3 = insertelement <8 x i8> %2, i8 0, i32 2 diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll index 2d1b7fcbf0239..ce03f8fad4a19 100644 --- a/llvm/test/CodeGen/X86/pr62286.ll +++ b/llvm/test/CodeGen/X86/pr62286.ll @@ -28,8 +28,9 @@ define i64 @PR62286(i32 %a) { ; AVX1-NEXT: vmovd %edi, %xmm0 ; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] ; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] ; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 @@ -58,12 +59,13 @@ define i64 @PR62286(i32 %a) { ; AVX512-LABEL: PR62286: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovd %edi, %xmm0 -; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] -; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 -; AVX512-NEXT: movw $4369, %ax # imm = 0x1111 +; AVX512-NEXT: movb $8, %al ; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vpaddd %zmm0, %zmm0, %zmm1 {%k1} -; AVX512-NEXT: vpmovsxdq %ymm1, %zmm0 +; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z} +; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index c16985e081334..5b61de5a3b772 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -3062,7 +3062,6 @@ define <8 x i16> @shuffle_scalar_to_vector_extract(ptr %p0, ptr %p1, ptr %p2) { ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: shuffle_scalar_to_vector_extract: @@ -3080,16 +3079,13 @@ define <8 x i16> @shuffle_scalar_to_vector_extract(ptr %p0, ptr %p1, ptr %p2) { ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSSE3-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: shuffle_scalar_to_vector_extract: ; SSE41: # %bb.0: -; SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 +; SSE41-NEXT: pmovsxbw (%rdi), %xmm0 ; SSE41-NEXT: pextrw $4, %xmm0, %eax ; SSE41-NEXT: pextrw $7, %xmm0, %ecx -; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: pinsrw $1, %eax, %xmm0 ; SSE41-NEXT: movl $65531, %eax # imm = 0xFFFB @@ -3099,25 +3095,22 @@ define <8 x i16> @shuffle_scalar_to_vector_extract(ptr %p0, ptr %p1, ptr %p2) { ; SSE41-NEXT: pinsrw $5, %eax, %xmm0 ; SSE41-NEXT: movsbl (%rdx), %eax ; SSE41-NEXT: pinsrw $6, %eax, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6],xmm1[7] ; SSE41-NEXT: retq ; ; AVX-LABEL: shuffle_scalar_to_vector_extract: ; AVX: # %bb.0: -; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX-NEXT: vpmovsxbw %xmm0, %xmm0 +; AVX-NEXT: vpmovsxbw (%rdi), %xmm0 ; AVX-NEXT: vpextrw $4, %xmm0, %eax ; AVX-NEXT: vpextrw $7, %xmm0, %ecx ; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1 +; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ; AVX-NEXT: movl $65531, %eax # imm = 0xFFFB -; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 ; AVX-NEXT: movsbl (%rsi), %eax -; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ; AVX-NEXT: movsbl (%rdx), %eax -; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6],xmm0[7] +; AVX-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ; AVX-NEXT: retq %tmp = load <8 x i8>, ptr %p0, align 1 %tmp1 = sext <8 x i8> %tmp to <8 x i16> diff --git a/llvm/test/CodeGen/X86/vector-trunc.ll b/llvm/test/CodeGen/X86/vector-trunc.ll index 27f4a3ecb206f..46f770a349d96 100644 --- a/llvm/test/CodeGen/X86/vector-trunc.ll +++ b/llvm/test/CodeGen/X86/vector-trunc.ll @@ -2018,8 +2018,7 @@ define i16 @PR66194(i8 %q) { ; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ; AVX2-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1] -; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] @@ -2028,120 +2027,33 @@ define i16 @PR66194(i8 %q) { ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovd %xmm0, %eax ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax -; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: PR66194: -; AVX512F: # %bb.0: # %entry -; AVX512F-NEXT: xorl %eax, %eax -; AVX512F-NEXT: xorl %ecx, %ecx -; AVX512F-NEXT: testb %dil, %dil -; AVX512F-NEXT: setne %al -; AVX512F-NEXT: sete %cl -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX512F-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX512F-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 -; AVX512F-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX512F-NEXT: vpbroadcastw {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1] -; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 -; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: PR66194: -; AVX512VL: # %bb.0: # %entry -; AVX512VL-NEXT: xorl %eax, %eax -; AVX512VL-NEXT: xorl %ecx, %ecx -; AVX512VL-NEXT: testb %dil, %dil -; AVX512VL-NEXT: setne %al -; AVX512VL-NEXT: sete %cl -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1] -; AVX512VL-NEXT: vpaddw %xmm0, %xmm1, %xmm0 -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVX512VL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVX512VL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512VL-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512VL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: PR66194: -; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: xorl %eax, %eax -; AVX512BW-NEXT: xorl %ecx, %ecx -; AVX512BW-NEXT: testb %dil, %dil -; AVX512BW-NEXT: setne %al -; AVX512BW-NEXT: sete %cl -; AVX512BW-NEXT: vmovd %eax, %xmm0 -; AVX512BW-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX512BW-NEXT: vpbroadcastw {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1] -; AVX512BW-NEXT: vpaddw %xmm0, %xmm1, %xmm0 -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512BW-NEXT: vmovd %xmm0, %eax -; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512BWVL-LABEL: PR66194: -; AVX512BWVL: # %bb.0: # %entry -; AVX512BWVL-NEXT: xorl %eax, %eax -; AVX512BWVL-NEXT: xorl %ecx, %ecx -; AVX512BWVL-NEXT: testb %dil, %dil -; AVX512BWVL-NEXT: setne %al -; AVX512BWVL-NEXT: sete %cl -; AVX512BWVL-NEXT: vmovd %eax, %xmm0 -; AVX512BWVL-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpbroadcastw {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1] -; AVX512BWVL-NEXT: vpaddw %xmm0, %xmm1, %xmm0 -; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVX512BWVL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVX512BWVL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512BWVL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vmovd %xmm0, %eax -; AVX512BWVL-NEXT: # kill: def $ax killed $ax killed $eax -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512-LABEL: PR66194: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: xorl %eax, %eax +; AVX512-NEXT: xorl %ecx, %ecx +; AVX512-NEXT: testb %dil, %dil +; AVX512-NEXT: setne %al +; AVX512-NEXT: sete %cl +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0 +; AVX512-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: # kill: def $ax killed $ax killed $eax +; AVX512-NEXT: retq entry: %cmp12.i.13 = icmp ne i8 %q, 0 %cond.i15.13 = zext i1 %cmp12.i.13 to i16 diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll index f0d6c1327aea6..572ed314ab31d 100644 --- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll +++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll @@ -3283,10 +3283,9 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm2 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0 +; AVX-NEXT: vmovdqa %xmm1, (%rcx) ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx) ; AVX-NEXT: vmovdqa %xmm2, 32(%rcx) -; AVX-NEXT: vmovdqa %xmm1, (%rcx) -; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX2-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3: _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits