Author: Akshat Oke Date: 2024-11-14T19:30:05+05:30 New Revision: 0cbf819ea5cba324f25edf135eb3ca00b9aa8d5e
URL: https://github.com/llvm/llvm-project/commit/0cbf819ea5cba324f25edf135eb3ca00b9aa8d5e DIFF: https://github.com/llvm/llvm-project/commit/0cbf819ea5cba324f25edf135eb3ca00b9aa8d5e.diff LOG: Revert "[NFC][CodeGen] Clang format MachineSink.cpp (#114027)" This reverts commit 43bef75fd65083349ec888fadfb99987f7804d18. Added: Modified: llvm/lib/CodeGen/MachineSink.cpp Removed: ################################################################################ diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index c470bd71dfb29f..105042a9976635 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -64,14 +64,14 @@ using namespace llvm; #define DEBUG_TYPE "machine-sink" static cl::opt<bool> - SplitEdges("machine-sink-split", - cl::desc("Split critical edges during machine sinking"), - cl::init(true), cl::Hidden); +SplitEdges("machine-sink-split", + cl::desc("Split critical edges during machine sinking"), + cl::init(true), cl::Hidden); -static cl::opt<bool> UseBlockFreqInfo( - "machine-sink-bfi", - cl::desc("Use block frequency info to find successors to sink"), - cl::init(true), cl::Hidden); +static cl::opt<bool> +UseBlockFreqInfo("machine-sink-bfi", + cl::desc("Use block frequency info to find successors to sink"), + cl::init(true), cl::Hidden); static cl::opt<unsigned> SplitEdgeProbabilityThreshold( "machine-sink-split-probability-threshold", @@ -102,180 +102,180 @@ static cl::opt<bool> static cl::opt<unsigned> SinkIntoCycleLimit( "machine-sink-cycle-limit", - cl::desc( - "The maximum number of instructions considered for cycle sinking."), + cl::desc("The maximum number of instructions considered for cycle sinking."), cl::init(50), cl::Hidden); -STATISTIC(NumSunk, "Number of machine instructions sunk"); -STATISTIC(NumCycleSunk, "Number of machine instructions sunk into a cycle"); -STATISTIC(NumSplit, "Number of critical edges split"); +STATISTIC(NumSunk, "Number of machine instructions sunk"); +STATISTIC(NumCycleSunk, "Number of machine instructions sunk into a cycle"); +STATISTIC(NumSplit, "Number of critical edges split"); STATISTIC(NumCoalesces, "Number of copies coalesced"); STATISTIC(NumPostRACopySink, "Number of copies sunk after RA"); namespace { -class MachineSinking : public MachineFunctionPass { - const TargetSubtargetInfo *STI = nullptr; - const TargetInstrInfo *TII = nullptr; - const TargetRegisterInfo *TRI = nullptr; - MachineRegisterInfo *MRI = nullptr; // Machine register information - MachineDominatorTree *DT = nullptr; // Machine dominator tree - MachinePostDominatorTree *PDT = nullptr; // Machine post dominator tree - MachineCycleInfo *CI = nullptr; - ProfileSummaryInfo *PSI = nullptr; - MachineBlockFrequencyInfo *MBFI = nullptr; - const MachineBranchProbabilityInfo *MBPI = nullptr; - AliasAnalysis *AA = nullptr; - RegisterClassInfo RegClassInfo; - - // Remember which edges have been considered for breaking. - SmallSet<std::pair<MachineBasicBlock *, MachineBasicBlock *>, 8> - CEBCandidates; - // Memorize the register that also wanted to sink into the same block along - // a diff erent critical edge. - // {register to sink, sink-to block} -> the first sink-from block. - // We're recording the first sink-from block because that (critical) edge - // was deferred until we see another register that's going to sink into the - // same block. - DenseMap<std::pair<Register, MachineBasicBlock *>, MachineBasicBlock *> - CEMergeCandidates; - // Remember which edges we are about to split. - // This is diff erent from CEBCandidates since those edges - // will be split. - SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit; - - DenseSet<Register> RegsToClearKillFlags; - - using AllSuccsCache = - SmallDenseMap<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>; - - /// DBG_VALUE pointer and flag. The flag is true if this DBG_VALUE is - /// post-dominated by another DBG_VALUE of the same variable location. - /// This is necessary to detect sequences such as: - /// %0 = someinst - /// DBG_VALUE %0, !123, !DIExpression() - /// %1 = anotherinst - /// DBG_VALUE %1, !123, !DIExpression() - /// Where if %0 were to sink, the DBG_VAUE should not sink with it, as that - /// would re-order assignments. - using SeenDbgUser = PointerIntPair<MachineInstr *, 1>; - - /// Record of DBG_VALUE uses of vregs in a block, so that we can identify - /// debug instructions to sink. - SmallDenseMap<unsigned, TinyPtrVector<SeenDbgUser>> SeenDbgUsers; - - /// Record of debug variables that have had their locations set in the - /// current block. - DenseSet<DebugVariable> SeenDbgVars; - - DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, bool> - HasStoreCache; - - DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, - SmallVector<MachineInstr *>> - StoreInstrCache; - - /// Cached BB's register pressure. - DenseMap<const MachineBasicBlock *, std::vector<unsigned>> - CachedRegisterPressure; - - bool EnableSinkAndFold; - -public: - static char ID; // Pass identification - - MachineSinking() : MachineFunctionPass(ID) { - initializeMachineSinkingPass(*PassRegistry::getPassRegistry()); - } - - bool runOnMachineFunction(MachineFunction &MF) override; + class MachineSinking : public MachineFunctionPass { + const TargetSubtargetInfo *STI = nullptr; + const TargetInstrInfo *TII = nullptr; + const TargetRegisterInfo *TRI = nullptr; + MachineRegisterInfo *MRI = nullptr; // Machine register information + MachineDominatorTree *DT = nullptr; // Machine dominator tree + MachinePostDominatorTree *PDT = nullptr; // Machine post dominator tree + MachineCycleInfo *CI = nullptr; + ProfileSummaryInfo *PSI = nullptr; + MachineBlockFrequencyInfo *MBFI = nullptr; + const MachineBranchProbabilityInfo *MBPI = nullptr; + AliasAnalysis *AA = nullptr; + RegisterClassInfo RegClassInfo; + + // Remember which edges have been considered for breaking. + SmallSet<std::pair<MachineBasicBlock*, MachineBasicBlock*>, 8> + CEBCandidates; + // Memorize the register that also wanted to sink into the same block along + // a diff erent critical edge. + // {register to sink, sink-to block} -> the first sink-from block. + // We're recording the first sink-from block because that (critical) edge + // was deferred until we see another register that's going to sink into the + // same block. + DenseMap<std::pair<Register, MachineBasicBlock *>, MachineBasicBlock *> + CEMergeCandidates; + // Remember which edges we are about to split. + // This is diff erent from CEBCandidates since those edges + // will be split. + SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit; + + DenseSet<Register> RegsToClearKillFlags; + + using AllSuccsCache = + SmallDenseMap<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>; + + /// DBG_VALUE pointer and flag. The flag is true if this DBG_VALUE is + /// post-dominated by another DBG_VALUE of the same variable location. + /// This is necessary to detect sequences such as: + /// %0 = someinst + /// DBG_VALUE %0, !123, !DIExpression() + /// %1 = anotherinst + /// DBG_VALUE %1, !123, !DIExpression() + /// Where if %0 were to sink, the DBG_VAUE should not sink with it, as that + /// would re-order assignments. + using SeenDbgUser = PointerIntPair<MachineInstr *, 1>; + + /// Record of DBG_VALUE uses of vregs in a block, so that we can identify + /// debug instructions to sink. + SmallDenseMap<unsigned, TinyPtrVector<SeenDbgUser>> SeenDbgUsers; + + /// Record of debug variables that have had their locations set in the + /// current block. + DenseSet<DebugVariable> SeenDbgVars; + + DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, bool> + HasStoreCache; + + DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, + SmallVector<MachineInstr *>> + StoreInstrCache; + + /// Cached BB's register pressure. + DenseMap<const MachineBasicBlock *, std::vector<unsigned>> + CachedRegisterPressure; + + bool EnableSinkAndFold; + + public: + static char ID; // Pass identification + + MachineSinking() : MachineFunctionPass(ID) { + initializeMachineSinkingPass(*PassRegistry::getPassRegistry()); + } - void getAnalysisUsage(AnalysisUsage &AU) const override { - MachineFunctionPass::getAnalysisUsage(AU); - AU.addRequired<AAResultsWrapperPass>(); - AU.addRequired<MachineDominatorTreeWrapperPass>(); - AU.addRequired<MachinePostDominatorTreeWrapperPass>(); - AU.addRequired<MachineCycleInfoWrapperPass>(); - AU.addRequired<MachineBranchProbabilityInfoWrapperPass>(); - AU.addPreserved<MachineCycleInfoWrapperPass>(); - AU.addPreserved<MachineLoopInfoWrapperPass>(); - AU.addRequired<ProfileSummaryInfoWrapperPass>(); - if (UseBlockFreqInfo) - AU.addRequired<MachineBlockFrequencyInfoWrapperPass>(); - AU.addRequired<TargetPassConfig>(); - } + bool runOnMachineFunction(MachineFunction &MF) override; + + void getAnalysisUsage(AnalysisUsage &AU) const override { + MachineFunctionPass::getAnalysisUsage(AU); + AU.addRequired<AAResultsWrapperPass>(); + AU.addRequired<MachineDominatorTreeWrapperPass>(); + AU.addRequired<MachinePostDominatorTreeWrapperPass>(); + AU.addRequired<MachineCycleInfoWrapperPass>(); + AU.addRequired<MachineBranchProbabilityInfoWrapperPass>(); + AU.addPreserved<MachineCycleInfoWrapperPass>(); + AU.addPreserved<MachineLoopInfoWrapperPass>(); + AU.addRequired<ProfileSummaryInfoWrapperPass>(); + if (UseBlockFreqInfo) + AU.addRequired<MachineBlockFrequencyInfoWrapperPass>(); + AU.addRequired<TargetPassConfig>(); + } - void releaseMemory() override { - CEBCandidates.clear(); - CEMergeCandidates.clear(); - } + void releaseMemory() override { + CEBCandidates.clear(); + CEMergeCandidates.clear(); + } -private: - bool ProcessBlock(MachineBasicBlock &MBB); - void ProcessDbgInst(MachineInstr &MI); - bool isLegalToBreakCriticalEdge(MachineInstr &MI, MachineBasicBlock *From, - MachineBasicBlock *To, bool BreakPHIEdge); - bool isWorthBreakingCriticalEdge(MachineInstr &MI, MachineBasicBlock *From, + private: + bool ProcessBlock(MachineBasicBlock &MBB); + void ProcessDbgInst(MachineInstr &MI); + bool isLegalToBreakCriticalEdge(MachineInstr &MI, MachineBasicBlock *From, + MachineBasicBlock *To, bool BreakPHIEdge); + bool isWorthBreakingCriticalEdge(MachineInstr &MI, MachineBasicBlock *From, + MachineBasicBlock *To, + MachineBasicBlock *&DeferredFromBlock); + + bool hasStoreBetween(MachineBasicBlock *From, MachineBasicBlock *To, + MachineInstr &MI); + + /// Postpone the splitting of the given critical + /// edge (\p From, \p To). + /// + /// We do not split the edges on the fly. Indeed, this invalidates + /// the dominance information and thus triggers a lot of updates + /// of that information underneath. + /// Instead, we postpone all the splits after each iteration of + /// the main loop. That way, the information is at least valid + /// for the lifetime of an iteration. + /// + /// \return True if the edge is marked as toSplit, false otherwise. + /// False can be returned if, for instance, this is not profitable. + bool PostponeSplitCriticalEdge(MachineInstr &MI, + MachineBasicBlock *From, MachineBasicBlock *To, - MachineBasicBlock *&DeferredFromBlock); - - bool hasStoreBetween(MachineBasicBlock *From, MachineBasicBlock *To, - MachineInstr &MI); - - /// Postpone the splitting of the given critical - /// edge (\p From, \p To). - /// - /// We do not split the edges on the fly. Indeed, this invalidates - /// the dominance information and thus triggers a lot of updates - /// of that information underneath. - /// Instead, we postpone all the splits after each iteration of - /// the main loop. That way, the information is at least valid - /// for the lifetime of an iteration. - /// - /// \return True if the edge is marked as toSplit, false otherwise. - /// False can be returned if, for instance, this is not profitable. - bool PostponeSplitCriticalEdge(MachineInstr &MI, MachineBasicBlock *From, - MachineBasicBlock *To, bool BreakPHIEdge); - bool SinkInstruction(MachineInstr &MI, bool &SawStore, - AllSuccsCache &AllSuccessors); - - /// If we sink a COPY inst, some debug users of it's destination may no - /// longer be dominated by the COPY, and will eventually be dropped. - /// This is easily rectified by forwarding the non-dominated debug uses - /// to the copy source. - void SalvageUnsunkDebugUsersOfCopy(MachineInstr &, - MachineBasicBlock *TargetBlock); - bool AllUsesDominatedByBlock(Register Reg, MachineBasicBlock *MBB, - MachineBasicBlock *DefMBB, bool &BreakPHIEdge, - bool &LocalUse) const; - MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, - bool &BreakPHIEdge, - AllSuccsCache &AllSuccessors); - - void FindCycleSinkCandidates(MachineCycle *Cycle, MachineBasicBlock *BB, - SmallVectorImpl<MachineInstr *> &Candidates); - bool SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I); - - bool isProfitableToSinkTo(Register Reg, MachineInstr &MI, - MachineBasicBlock *MBB, - MachineBasicBlock *SuccToSinkTo, - AllSuccsCache &AllSuccessors); - - bool PerformTrivialForwardCoalescing(MachineInstr &MI, - MachineBasicBlock *MBB); - - bool PerformSinkAndFold(MachineInstr &MI, MachineBasicBlock *MBB); - - SmallVector<MachineBasicBlock *, 4> & - GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB, - AllSuccsCache &AllSuccessors) const; - - std::vector<unsigned> &getBBRegisterPressure(const MachineBasicBlock &MBB); - - bool registerPressureSetExceedsLimit(unsigned NRegs, - const TargetRegisterClass *RC, - const MachineBasicBlock &MBB); -}; + bool BreakPHIEdge); + bool SinkInstruction(MachineInstr &MI, bool &SawStore, + AllSuccsCache &AllSuccessors); + + /// If we sink a COPY inst, some debug users of it's destination may no + /// longer be dominated by the COPY, and will eventually be dropped. + /// This is easily rectified by forwarding the non-dominated debug uses + /// to the copy source. + void SalvageUnsunkDebugUsersOfCopy(MachineInstr &, + MachineBasicBlock *TargetBlock); + bool AllUsesDominatedByBlock(Register Reg, MachineBasicBlock *MBB, + MachineBasicBlock *DefMBB, bool &BreakPHIEdge, + bool &LocalUse) const; + MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, + bool &BreakPHIEdge, AllSuccsCache &AllSuccessors); + + void FindCycleSinkCandidates(MachineCycle *Cycle, MachineBasicBlock *BB, + SmallVectorImpl<MachineInstr *> &Candidates); + bool SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I); + + bool isProfitableToSinkTo(Register Reg, MachineInstr &MI, + MachineBasicBlock *MBB, + MachineBasicBlock *SuccToSinkTo, + AllSuccsCache &AllSuccessors); + + bool PerformTrivialForwardCoalescing(MachineInstr &MI, + MachineBasicBlock *MBB); + + bool PerformSinkAndFold(MachineInstr &MI, MachineBasicBlock *MBB); + + SmallVector<MachineBasicBlock *, 4> & + GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB, + AllSuccsCache &AllSuccessors) const; + + std::vector<unsigned> &getBBRegisterPressure(const MachineBasicBlock &MBB); + + bool registerPressureSetExceedsLimit(unsigned NRegs, + const TargetRegisterClass *RC, + const MachineBasicBlock &MBB); + }; } // end anonymous namespace @@ -283,15 +283,15 @@ char MachineSinking::ID = 0; char &llvm::MachineSinkingID = MachineSinking::ID; -INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE, "Machine code sinking", false, - false) +INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE, + "Machine code sinking", false, false) INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(MachineCycleInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) -INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE, "Machine code sinking", false, - false) +INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE, + "Machine code sinking", false, false) /// Return true if a target defined block prologue instruction interferes /// with a sink candidate. @@ -642,7 +642,7 @@ bool MachineSinking::AllUsesDominatedByBlock(Register Reg, if (UseInst->isPHI()) { // PHI nodes use the operand in the predecessor block, not the block with // the PHI. - UseBlock = UseInst->getOperand(OpNo + 1).getMBB(); + UseBlock = UseInst->getOperand(OpNo+1).getMBB(); } else if (UseBlock == DefMBB) { LocalUse = true; return false; @@ -743,7 +743,7 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { CEBCandidates.clear(); CEMergeCandidates.clear(); ToSplit.clear(); - for (auto &MBB : MF) + for (auto &MBB: MF) MadeChange |= ProcessBlock(MBB); // If we have anything we marked as toSplit, split it now. @@ -764,8 +764,7 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n"); } // If this iteration over the code changed anything, keep iterating. - if (!MadeChange) - break; + if (!MadeChange) break; EverMadeChange = true; } @@ -817,8 +816,7 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) { // Don't bother sinking code out of unreachable blocks. In addition to being // unprofitable, it can also lead to infinite looping, because in an // unreachable cycle there may be nowhere to stop. - if (!DT->isReachableFromEntry(&MBB)) - return false; + if (!DT->isReachableFromEntry(&MBB)) return false; bool MadeChange = false; @@ -830,7 +828,7 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) { --I; bool ProcessedBegin, SawStore = false; do { - MachineInstr &MI = *I; // The instruction to sink. + MachineInstr &MI = *I; // The instruction to sink. // Predecrement I (if it's not begin) so that it isn't invalidated by // sinking. @@ -926,9 +924,8 @@ bool MachineSinking::isWorthBreakingCriticalEdge( } } - if (From->isSuccessor(To) && - MBPI->getEdgeProbability(From, To) <= - BranchProbability(SplitEdgeProbabilityThreshold, 100)) + if (From->isSuccessor(To) && MBPI->getEdgeProbability(From, To) <= + BranchProbability(SplitEdgeProbabilityThreshold, 100)) return true; // MI is cheap, we probably don't want to break the critical edge for it. @@ -1102,7 +1099,7 @@ bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI, MachineBasicBlock *MBB, MachineBasicBlock *SuccToSinkTo, AllSuccsCache &AllSuccessors) { - assert(SuccToSinkTo && "Invalid SinkTo Candidate BB"); + assert (SuccToSinkTo && "Invalid SinkTo Candidate BB"); if (MBB == SuccToSinkTo) return false; @@ -1136,8 +1133,8 @@ bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI, MachineCycle *MCycle = CI->getCycle(MBB); - // If the instruction is not inside a cycle, it is not profitable to sink MI - // to a post dominate block SuccToSinkTo. + // If the instruction is not inside a cycle, it is not profitable to sink MI to + // a post dominate block SuccToSinkTo. if (!MCycle) return false; @@ -1153,8 +1150,7 @@ bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI, if (Reg.isPhysical()) { // Don't handle non-constant and non-ignorable physical register uses. - if (MO.isUse() && !MRI->isConstantPhysReg(Reg) && - !TII->isIgnorableUse(MO)) + if (MO.isUse() && !MRI->isConstantPhysReg(Reg) && !TII->isIgnorableUse(MO)) return false; continue; } @@ -1242,7 +1238,7 @@ MachineBasicBlock * MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, bool &BreakPHIEdge, AllSuccsCache &AllSuccessors) { - assert(MBB && "Invalid MachineBasicBlock!"); + assert (MBB && "Invalid MachineBasicBlock!"); // loop over all the operands of the specified instruction. If there is // anything we can't handle, bail out. @@ -1251,12 +1247,10 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, // decide. MachineBasicBlock *SuccToSinkTo = nullptr; for (const MachineOperand &MO : MI.operands()) { - if (!MO.isReg()) - continue; // Ignore non-register operands. + if (!MO.isReg()) continue; // Ignore non-register operands. Register Reg = MO.getReg(); - if (Reg == 0) - continue; + if (Reg == 0) continue; if (Reg.isPhysical()) { if (MO.isUse()) { @@ -1271,8 +1265,7 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, } } else { // Virtual register uses are always safe to sink. - if (MO.isUse()) - continue; + if (MO.isUse()) continue; // If it's not safe to move defs of the register class, then abort. if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg))) @@ -1284,8 +1277,8 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, // If a previous operand picked a block to sink to, then this operand // must be sinkable to the same block. bool LocalUse = false; - if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge, - LocalUse)) + if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, + BreakPHIEdge, LocalUse)) return nullptr; continue; @@ -1298,8 +1291,8 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, for (MachineBasicBlock *SuccBlock : GetAllSortedSuccessors(MI, MBB, AllSuccessors)) { bool LocalUse = false; - if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB, BreakPHIEdge, - LocalUse)) { + if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB, + BreakPHIEdge, LocalUse)) { SuccToSinkTo = SuccBlock; break; } @@ -1534,7 +1527,7 @@ bool MachineSinking::hasStoreBetween(MachineBasicBlock *From, for (auto *DomBB : HandledDomBlocks) { if (DomBB != BB && DT->dominates(DomBB, BB)) HasStoreCache[std::make_pair(DomBB, To)] = true; - else if (DomBB != BB && DT->dominates(BB, DomBB)) + else if(DomBB != BB && DT->dominates(BB, DomBB)) HasStoreCache[std::make_pair(From, DomBB)] = true; } HasStoreCache[BlockPair] = true; @@ -1548,7 +1541,7 @@ bool MachineSinking::hasStoreBetween(MachineBasicBlock *From, for (auto *DomBB : HandledDomBlocks) { if (DomBB != BB && DT->dominates(DomBB, BB)) HasStoreCache[std::make_pair(DomBB, To)] = true; - else if (DomBB != BB && DT->dominates(BB, DomBB)) + else if(DomBB != BB && DT->dominates(BB, DomBB)) HasStoreCache[std::make_pair(From, DomBB)] = true; } HasStoreCache[BlockPair] = true; @@ -1614,8 +1607,8 @@ bool MachineSinking::SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I) { CanSink = false; break; } - LLVM_DEBUG(dbgs() << "CycleSink: Setting nearest common dom block: " - << printMBBReference(*SinkBlock) << "\n"); + LLVM_DEBUG(dbgs() << "CycleSink: Setting nearest common dom block: " << + printMBBReference(*SinkBlock) << "\n"); } if (!CanSink) { @@ -1741,8 +1734,8 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore, // Mark this edge as to be split. // If the edge can actually be split, the next iteration of the main loop // will sink MI in the newly created block. - bool Status = PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, - BreakPHIEdge); + bool Status = + PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge); if (!Status) LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to " "break critical edge\n"); @@ -1755,8 +1748,8 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore, // BreakPHIEdge is true if all the uses are in the successor MBB being // sunken into and they are all PHI nodes. In this case, machine-sink must // break the critical edge first. - bool Status = - PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge); + bool Status = PostponeSplitCriticalEdge(MI, ParentBlock, + SuccToSinkTo, BreakPHIEdge); if (!Status) LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to " "break critical edge\n"); _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits