Author: Kazu Hirata Date: 2021-01-23T11:25:27-08:00 New Revision: e4847a7fcf777eedc748d2476323726960ab29b7
URL: https://github.com/llvm/llvm-project/commit/e4847a7fcf777eedc748d2476323726960ab29b7 DIFF: https://github.com/llvm/llvm-project/commit/e4847a7fcf777eedc748d2476323726960ab29b7.diff LOG: Revert "[Target] Use llvm::append_range (NFC)" This reverts commit cc7a23828657f35f706343982cf96bb6583d4d73. The X86WinEHState.cpp hunk seems to break certain builds. Added: Modified: llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp llvm/lib/Target/AMDGPU/SIISelLowering.cpp llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp llvm/lib/Target/ARM/ARMISelLowering.cpp llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp llvm/lib/Target/ARM/ARMParallelDSP.cpp llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp llvm/lib/Target/Hexagon/RDFDeadCode.cpp llvm/lib/Target/PowerPC/PPCCTRLoops.cpp llvm/lib/Target/X86/X86PartialReduction.cpp llvm/lib/Target/X86/X86WinEHState.cpp Removed: ################################################################################ diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp index 51af25050950..4fca8bec7423 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp @@ -81,7 +81,8 @@ recursivelyVisitUsers(GlobalValue &GV, continue; } - append_range(Stack, U->users()); + for (User *UU : U->users()) + Stack.push_back(UU); } } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index e959c5f0f8d3..2ce1ac51c018 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -6208,10 +6208,12 @@ SDValue SITargetLowering::lowerImage(SDValue Op, SmallVector<SDValue, 26> Ops; if (BaseOpcode->Store || BaseOpcode->Atomic) Ops.push_back(VData); // vdata - if (UseNSA) - append_range(Ops, VAddrs); - else + if (UseNSA) { + for (const SDValue &Addr : VAddrs) + Ops.push_back(Addr); + } else { Ops.push_back(VAddr); + } Ops.push_back(Op.getOperand(ArgOffset + Intr->RsrcIndex)); if (BaseOpcode->Sampler) Ops.push_back(Op.getOperand(ArgOffset + Intr->SampIndex)); diff --git a/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp b/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp index 9570680ad9cb..18ab7d7cd555 100644 --- a/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -177,8 +177,10 @@ class PhiIncomingAnalysis { } } - if (Divergent && PDT.dominates(&DefBlock, MBB)) - append_range(Stack, MBB->successors()); + if (Divergent && PDT.dominates(&DefBlock, MBB)) { + for (MachineBasicBlock *Succ : MBB->successors()) + Stack.push_back(Succ); + } } while (!Stack.empty()) { @@ -187,7 +189,8 @@ class PhiIncomingAnalysis { continue; ReachableOrdered.push_back(MBB); - append_range(Stack, MBB->successors()); + for (MachineBasicBlock *Succ : MBB->successors()) + Stack.push_back(Succ); } for (MachineBasicBlock *MBB : ReachableOrdered) { diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 397979b4ab1e..f6f8597f3a69 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -3556,7 +3556,8 @@ static bool allUsersAreInFunction(const Value *V, const Function *F) { while (!Worklist.empty()) { auto *U = Worklist.pop_back_val(); if (isa<ConstantExpr>(U)) { - append_range(Worklist, U->users()); + for (auto *UU : U->users()) + Worklist.push_back(UU); continue; } @@ -19125,7 +19126,8 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, SmallVector<Value *, 6> Ops; Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); - append_range(Ops, Shuffles); + for (auto S : Shuffles) + Ops.push_back(S); Ops.push_back(Builder.getInt32(SI->getAlignment())); Builder.CreateCall(VstNFunc, Ops); } else { @@ -19141,7 +19143,8 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, SmallVector<Value *, 6> Ops; Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy)); - append_range(Ops, Shuffles); + for (auto S : Shuffles) + Ops.push_back(S); for (unsigned F = 0; F < Factor; F++) { Ops.push_back(Builder.getInt32(F)); Builder.CreateCall(VstNFunc, Ops); diff --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp index 61a924078f29..2b53f57a7f09 100644 --- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -143,7 +143,8 @@ namespace { // Insert exit blocks. SmallVector<MachineBasicBlock*, 2> ExitBlocks; ML.getExitBlocks(ExitBlocks); - append_range(Order, ExitBlocks); + for (auto *MBB : ExitBlocks) + Order.push_back(MBB); // Then add the loop body. Search(ML.getHeader()); diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp index 9a7c1f541aa2..9a3776fe64a7 100644 --- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp +++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp @@ -202,7 +202,8 @@ namespace { public: WidenedLoad(SmallVectorImpl<LoadInst*> &Lds, LoadInst *Wide) : NewLd(Wide) { - append_range(Loads, Lds); + for (auto *I : Lds) + Loads.push_back(I); } LoadInst *getLoad() { return NewLd; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp index 29b75814da6e..cf5ea5d53af6 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp @@ -315,7 +315,8 @@ HexagonTargetLowering::getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops, const SDLoc &dl, SelectionDAG &DAG) const { SmallVector<SDValue,4> IntOps; IntOps.push_back(DAG.getConstant(IntId, dl, MVT::i32)); - append_range(IntOps, Ops); + for (const SDValue &Op : Ops) + IntOps.push_back(Op); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ResTy, IntOps); } diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp index c8c66ebb69cd..60c2feb766cc 100644 --- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -574,9 +574,12 @@ void HexagonSplitDoubleRegs::collectIndRegs(LoopRegMap &IRM) { LoopVector WorkQ; - append_range(WorkQ, *MLI); - for (unsigned i = 0; i < WorkQ.size(); ++i) - append_range(WorkQ, *WorkQ[i]); + for (auto I : *MLI) + WorkQ.push_back(I); + for (unsigned i = 0; i < WorkQ.size(); ++i) { + for (auto I : *WorkQ[i]) + WorkQ.push_back(I); + } USet Rs; for (unsigned i = 0, n = WorkQ.size(); i < n; ++i) { diff --git a/llvm/lib/Target/Hexagon/RDFDeadCode.cpp b/llvm/lib/Target/Hexagon/RDFDeadCode.cpp index 894bdf38fe17..5a98debd3c00 100644 --- a/llvm/lib/Target/Hexagon/RDFDeadCode.cpp +++ b/llvm/lib/Target/Hexagon/RDFDeadCode.cpp @@ -195,7 +195,8 @@ bool DeadCodeElimination::erase(const SetVector<NodeId> &Nodes) { // If it's a code node, add all ref nodes from it. uint16_t Kind = BA.Addr->getKind(); if (Kind == NodeAttrs::Stmt || Kind == NodeAttrs::Phi) { - append_range(DRNs, NodeAddr<CodeNode*>(BA).Addr->members(DFG)); + for (auto N : NodeAddr<CodeNode*>(BA).Addr->members(DFG)) + DRNs.push_back(N); DINs.push_back(DFG.addr<InstrNode*>(I)); } else { llvm_unreachable("Unexpected code node"); diff --git a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp index b9518d6d7064..77ea232b0662 100644 --- a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -148,7 +148,9 @@ static bool verifyCTRBranch(MachineBasicBlock *MBB, return false; } - append_range(Preds, MBB->predecessors()); + for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(), + PIE = MBB->pred_end(); PI != PIE; ++PI) + Preds.push_back(*PI); } do { diff --git a/llvm/lib/Target/X86/X86PartialReduction.cpp b/llvm/lib/Target/X86/X86PartialReduction.cpp index babd923e7496..8784a3df1773 100644 --- a/llvm/lib/Target/X86/X86PartialReduction.cpp +++ b/llvm/lib/Target/X86/X86PartialReduction.cpp @@ -392,7 +392,8 @@ static void collectLeaves(Value *Root, SmallVectorImpl<Instruction *> &Leaves) { break; // Push incoming values to the worklist. - append_range(Worklist, PN->incoming_values()); + for (Value *InV : PN->incoming_values()) + Worklist.push_back(InV); continue; } @@ -401,7 +402,8 @@ static void collectLeaves(Value *Root, SmallVectorImpl<Instruction *> &Leaves) { if (BO->getOpcode() == Instruction::Add) { // Simple case. Single use, just push its operands to the worklist. if (BO->hasNUses(BO == Root ? 2 : 1)) { - append_range(Worklist, BO->operands()); + for (Value *Op : BO->operands()) + Worklist.push_back(Op); continue; } @@ -424,7 +426,8 @@ static void collectLeaves(Value *Root, SmallVectorImpl<Instruction *> &Leaves) { continue; // The phi forms a loop with this Add, push its operands. - append_range(Worklist, BO->operands()); + for (Value *Op : BO->operands()) + Worklist.push_back(Op); } } } diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp index 7db1504b8ac0..8d8bd5e6b326 100644 --- a/llvm/lib/Target/X86/X86WinEHState.cpp +++ b/llvm/lib/Target/X86/X86WinEHState.cpp @@ -704,7 +704,8 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) { // enqueue it's successors to see if we can infer their states. InitialStates.insert({BB, PredState}); FinalStates.insert({BB, PredState}); - append_range(Worklist, successors(BB)); + for (BasicBlock *SuccBB : successors(BB)) + Worklist.push_back(SuccBB); } // Try to hoist stores from successors. _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits