https://github.com/Ris-Bali updated https://github.com/llvm/llvm-project/pull/71220
>From 0948e11b508e3f978f76a639f27101c8825250c7 Mon Sep 17 00:00:00 2001 From: Rishabh Bali <rishabhsb...@gmail.com> Date: Sun, 14 Jan 2024 22:50:06 +0530 Subject: [PATCH 1/4] Port Atomicexpandpass to new PM --- llvm/include/llvm/CodeGen/AtomicExpandUtils.h | 4 +- llvm/include/llvm/CodeGen/ExpandAtomic.h | 30 ++++ .../llvm/CodeGen/MachinePassRegistry.def | 2 +- llvm/include/llvm/CodeGen/Passes.h | 8 +- .../llvm/CodeGen/TargetSubtargetInfo.h | 2 +- llvm/include/llvm/InitializePasses.h | 2 +- llvm/lib/CodeGen/CMakeLists.txt | 2 +- llvm/lib/CodeGen/CodeGen.cpp | 2 +- ...micExpandPass.cpp => ExpandAtomicPass.cpp} | 149 +++++++++++------- llvm/lib/CodeGen/TargetSubtargetInfo.cpp | 2 +- llvm/lib/Passes/PassBuilder.cpp | 1 + llvm/lib/Passes/PassRegistry.def | 1 + .../Target/AArch64/AArch64TargetMachine.cpp | 2 +- .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 2 +- llvm/lib/Target/ARC/ARCTargetMachine.cpp | 2 +- llvm/lib/Target/ARM/ARMTargetMachine.cpp | 2 +- llvm/lib/Target/BPF/BPFTargetMachine.cpp | 2 +- llvm/lib/Target/CSKY/CSKYTargetMachine.cpp | 2 +- .../Target/Hexagon/HexagonTargetMachine.cpp | 2 +- llvm/lib/Target/Lanai/LanaiTargetMachine.cpp | 2 +- .../LoongArch/LoongArchTargetMachine.cpp | 2 +- llvm/lib/Target/M68k/M68kTargetMachine.cpp | 2 +- .../lib/Target/MSP430/MSP430TargetMachine.cpp | 2 +- llvm/lib/Target/Mips/MipsTargetMachine.cpp | 2 +- llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp | 2 +- .../PowerPC/PPCExpandAtomicPseudoInsts.cpp | 2 +- llvm/lib/Target/PowerPC/PPCTargetMachine.cpp | 2 +- llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 2 +- llvm/lib/Target/Sparc/SparcTargetMachine.cpp | 2 +- .../Target/SystemZ/SystemZTargetMachine.cpp | 2 +- llvm/lib/Target/VE/VETargetMachine.cpp | 2 +- .../WebAssembly/WebAssemblySubtarget.cpp | 2 +- .../Target/WebAssembly/WebAssemblySubtarget.h | 2 +- .../WebAssembly/WebAssemblyTargetMachine.cpp | 2 +- llvm/lib/Target/X86/X86TargetMachine.cpp | 2 +- llvm/lib/Target/XCore/XCoreTargetMachine.cpp | 2 +- .../test/CodeGen/AMDGPU/idemponent-atomics.ll | 2 +- .../CodeGen/AMDGPU/private-memory-atomics.ll | 2 +- .../AtomicExpand/AArch64/atomicrmw-fp.ll | 2 +- .../AArch64/expand-atomicrmw-xchg-fp.ll | 4 +- .../AtomicExpand/AArch64/pcsections.ll | 2 +- .../AMDGPU/expand-atomic-i16-system.ll | 2 +- .../AtomicExpand/AMDGPU/expand-atomic-i16.ll | 4 +- .../AMDGPU/expand-atomic-i8-system.ll | 2 +- .../AtomicExpand/AMDGPU/expand-atomic-i8.ll | 4 +- ...and-atomic-rmw-fadd-flat-specialization.ll | 8 +- .../AMDGPU/expand-atomic-rmw-fadd.ll | 12 +- .../AMDGPU/expand-atomic-rmw-fmax.ll | 4 +- .../AMDGPU/expand-atomic-rmw-fmin.ll | 4 +- .../AMDGPU/expand-atomic-rmw-fsub.ll | 4 +- .../AMDGPU/expand-atomic-rmw-nand.ll | 4 +- .../expand-atomic-simplify-cfg-CAS-block.ll | 2 +- .../AtomicExpand/AMDGPU/unaligned-atomic.ll | 2 +- .../AtomicExpand/ARM/atomic-expansion-v7.ll | 2 +- .../AtomicExpand/ARM/atomic-expansion-v8.ll | 2 +- .../AtomicExpand/ARM/atomicrmw-fp.ll | 2 +- .../AtomicExpand/ARM/cmpxchg-weak.ll | 2 +- .../AtomicExpand/Hexagon/atomicrmw-fp.ll | 2 +- .../AtomicExpand/LoongArch/atomicrmw-fp.ll | 2 +- .../LoongArch/load-store-atomic.ll | 4 +- .../AtomicExpand/Mips/atomicrmw-fp.ll | 2 +- .../AtomicExpand/PowerPC/atomicrmw-fp.ll | 2 +- .../AtomicExpand/PowerPC/cfence-double.ll | 4 +- .../AtomicExpand/PowerPC/cfence-float.ll | 4 +- .../AtomicExpand/PowerPC/cmpxchg.ll | 4 +- .../AtomicExpand/PowerPC/issue55983.ll | 4 +- .../AtomicExpand/RISCV/atomicrmw-fp.ll | 2 +- .../Transforms/AtomicExpand/SPARC/libcalls.ll | 2 +- .../Transforms/AtomicExpand/SPARC/partword.ll | 2 +- .../AtomicExpand/X86/expand-atomic-libcall.ll | 2 +- .../X86/expand-atomic-non-integer.ll | 2 +- .../AtomicExpand/X86/expand-atomic-rmw-fp.ll | 2 +- .../X86/expand-atomic-rmw-initial-load.ll | 2 +- .../AtomicExpand/X86/expand-atomic-xchg-fp.ll | 2 +- llvm/tools/opt/opt.cpp | 2 - .../gn/secondary/llvm/lib/CodeGen/BUILD.gn | 2 +- 76 files changed, 219 insertions(+), 154 deletions(-) create mode 100644 llvm/include/llvm/CodeGen/ExpandAtomic.h rename llvm/lib/CodeGen/{AtomicExpandPass.cpp => ExpandAtomicPass.cpp} (95%) diff --git a/llvm/include/llvm/CodeGen/AtomicExpandUtils.h b/llvm/include/llvm/CodeGen/AtomicExpandUtils.h index 1cb410a0c31c69..851492678aeba5 100644 --- a/llvm/include/llvm/CodeGen/AtomicExpandUtils.h +++ b/llvm/include/llvm/CodeGen/AtomicExpandUtils.h @@ -34,7 +34,7 @@ using CreateCmpXchgInstFun = /// instructions directly into a platform specific intrinsics (because, say, /// those intrinsics don't exist). If such a pass is able to expand cmpxchg /// instructions directly however, then, with this function, it could avoid two -/// extra module passes (avoiding passes by `-atomic-expand` and itself). A +/// extra module passes (avoiding passes by `-expand-atomic` and itself). A /// specific example would be PNaCl's `RewriteAtomics` pass. /// /// Given: atomicrmw some_op iN* %addr, iN %incr ordering @@ -46,7 +46,7 @@ using CreateCmpXchgInstFun = /// loop: /// %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ] /// %new = some_op iN %loaded, %incr -/// ; This is what -atomic-expand will produce using this function on i686 +/// ; This is what -expand-atomic will produce using this function on i686 /// targets: /// %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val /// %new_loaded = extractvalue { iN, i1 } %pair, 0 diff --git a/llvm/include/llvm/CodeGen/ExpandAtomic.h b/llvm/include/llvm/CodeGen/ExpandAtomic.h new file mode 100644 index 00000000000000..4ba49f8886ca94 --- /dev/null +++ b/llvm/include/llvm/CodeGen/ExpandAtomic.h @@ -0,0 +1,30 @@ +//===-- ExpandAtomic.h - Expand Atomic Instructions -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_EXPANDATOMIC_H +#define LLVM_CODEGEN_EXPANDATOMIC_H + +#include "llvm/IR/PassManager.h" + +namespace llvm { + +class Function; +class TargetMachine; + +class ExpandAtomicPass : public PassInfoMixin<ExpandAtomicPass> { +private: + const TargetMachine *TM; + +public: + ExpandAtomicPass(const TargetMachine *TM) : TM(TM) {} + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); +}; + +} // end namespace llvm + +#endif // LLVM_CODEGEN_EXPANDATOMIC_H diff --git a/llvm/include/llvm/CodeGen/MachinePassRegistry.def b/llvm/include/llvm/CodeGen/MachinePassRegistry.def index e789747036ef9a..97cb359630f736 100644 --- a/llvm/include/llvm/CodeGen/MachinePassRegistry.def +++ b/llvm/include/llvm/CodeGen/MachinePassRegistry.def @@ -135,7 +135,7 @@ MACHINE_FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, #ifndef DUMMY_FUNCTION_PASS #define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) #endif -DUMMY_FUNCTION_PASS("atomic-expand", AtomicExpandPass, ()) +DUMMY_FUNCTION_PASS("expand-atomic", ExpandAtomicPass, ()) #undef DUMMY_FUNCTION_PASS #ifndef DUMMY_MACHINE_MODULE_PASS diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h index bbfb8a0dbe26a4..4f63c32d9fd2eb 100644 --- a/llvm/include/llvm/CodeGen/Passes.h +++ b/llvm/include/llvm/CodeGen/Passes.h @@ -41,10 +41,10 @@ class FileSystem; // List of target independent CodeGen pass IDs. namespace llvm { - /// AtomicExpandPass - At IR level this pass replace atomic instructions with + /// ExpandAtomicPass - At IR level this pass replace atomic instructions with /// __atomic_* library calls, or target specific instruction which implement the /// same semantics in a way which better fits the target backend. - FunctionPass *createAtomicExpandPass(); + FunctionPass *createExpandAtomicPass(); /// createUnreachableBlockEliminationPass - The LLVM code generator does not /// work well with unreachable basic blocks (what live ranges make sense for a @@ -101,9 +101,9 @@ namespace llvm { /// handling of complex number arithmetic FunctionPass *createComplexDeinterleavingPass(const TargetMachine *TM); - /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg + /// ExpandAtomicID -- Lowers atomic operations in terms of either cmpxchg /// load-linked/store-conditional loops. - extern char &AtomicExpandID; + extern char &ExpandAtomicID; /// MachineLoopInfo - This pass is a loop analysis pass. extern char &MachineLoopInfoID; diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h index 55ef95c2854319..da1fd6737b796f 100644 --- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h +++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h @@ -215,7 +215,7 @@ class TargetSubtargetInfo : public MCSubtargetInfo { virtual bool enablePostRAMachineScheduler() const; /// True if the subtarget should run the atomic expansion pass. - virtual bool enableAtomicExpand() const; + virtual bool enableExpandAtomic() const; /// True if the subtarget should run the indirectbr expansion pass. virtual bool enableIndirectBrExpand() const; diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index 3db639a6872407..efcfa080912b82 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -52,7 +52,6 @@ void initializeAAResultsWrapperPassPass(PassRegistry&); void initializeAlwaysInlinerLegacyPassPass(PassRegistry&); void initializeAssignmentTrackingAnalysisPass(PassRegistry &); void initializeAssumptionCacheTrackerPass(PassRegistry&); -void initializeAtomicExpandPass(PassRegistry&); void initializeBasicBlockPathCloningPass(PassRegistry &); void initializeBasicBlockSectionsProfileReaderWrapperPassPass(PassRegistry &); void initializeBasicBlockSectionsPass(PassRegistry &); @@ -101,6 +100,7 @@ void initializeEarlyMachineLICMPass(PassRegistry&); void initializeEarlyTailDuplicatePass(PassRegistry&); void initializeEdgeBundlesPass(PassRegistry&); void initializeEHContGuardCatchretPass(PassRegistry &); +void initializeExpandAtomicPass(PassRegistry&); void initializeExpandLargeFpConvertLegacyPassPass(PassRegistry&); void initializeExpandLargeDivRemLegacyPassPass(PassRegistry&); void initializeExpandMemCmpLegacyPassPass(PassRegistry &); diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt index df2d1831ee5fdb..c237574bdf542f 100644 --- a/llvm/lib/CodeGen/CMakeLists.txt +++ b/llvm/lib/CodeGen/CMakeLists.txt @@ -40,7 +40,6 @@ add_llvm_component_library(LLVMCodeGen AllocationOrder.cpp Analysis.cpp AssignmentTrackingAnalysis.cpp - AtomicExpandPass.cpp BasicTargetTransformInfo.cpp BranchFolding.cpp BranchRelaxation.cpp @@ -69,6 +68,7 @@ add_llvm_component_library(LLVMCodeGen EdgeBundles.cpp EHContGuardCatchret.cpp ExecutionDomainFix.cpp + ExpandAtomicPass.cpp ExpandLargeDivRem.cpp ExpandLargeFpConvert.cpp ExpandMemCmp.cpp diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp index 418066452c1724..230776984af0ce 100644 --- a/llvm/lib/CodeGen/CodeGen.cpp +++ b/llvm/lib/CodeGen/CodeGen.cpp @@ -19,7 +19,6 @@ using namespace llvm; /// initializeCodeGen - Initialize all passes linked into the CodeGen library. void llvm::initializeCodeGen(PassRegistry &Registry) { initializeAssignmentTrackingAnalysisPass(Registry); - initializeAtomicExpandPass(Registry); initializeBasicBlockPathCloningPass(Registry); initializeBasicBlockSectionsPass(Registry); initializeBranchFolderPassPass(Registry); @@ -39,6 +38,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeEarlyIfPredicatorPass(Registry); initializeEarlyMachineLICMPass(Registry); initializeEarlyTailDuplicatePass(Registry); + initializeExpandAtomicLegacyPass(Registry); initializeExpandLargeDivRemLegacyPassPass(Registry); initializeExpandLargeFpConvertLegacyPassPass(Registry); initializeExpandMemCmpLegacyPassPass(Registry); diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/ExpandAtomicPass.cpp similarity index 95% rename from llvm/lib/CodeGen/AtomicExpandPass.cpp rename to llvm/lib/CodeGen/ExpandAtomicPass.cpp index ccf3e9ec649210..5f8e069bafc7a6 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/ExpandAtomicPass.cpp @@ -1,4 +1,4 @@ -//===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===// +//===- ExpandAtomicPass.cpp - Expand atomic instructions ------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -19,6 +19,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/InstSimplifyFolder.h" #include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/CodeGen/ExpandAtomic.h" #include "llvm/CodeGen/AtomicExpandUtils.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/TargetLowering.h" @@ -55,23 +56,14 @@ using namespace llvm; -#define DEBUG_TYPE "atomic-expand" +#define DEBUG_TYPE "expand-atomic" namespace { -class AtomicExpand : public FunctionPass { +class ExpandAtomicImpl { const TargetLowering *TLI = nullptr; const DataLayout *DL = nullptr; -public: - static char ID; // Pass identification, replacement for typeid - - AtomicExpand() : FunctionPass(ID) { - initializeAtomicExpandPass(*PassRegistry::getPassRegistry()); - } - - bool runOnFunction(Function &F) override; - private: bool bracketInstWithFences(Instruction *I, AtomicOrdering Order); IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL); @@ -124,6 +116,20 @@ class AtomicExpand : public FunctionPass { friend bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg); + +public: + bool run(Function &F, const TargetMachine *TM); +}; + +class ExpandAtomicLegacy : public FunctionPass { +public: + static char ID; // Pass identification, replacement for typeid + + ExpandAtomicLegacy() : FunctionPass(ID) { + initializeExpandAtomicLegacyPass(*PassRegistry::getPassRegistry()); + } + + bool runOnFunction(Function &F) override; }; // IRBuilder to be used for replacement atomic instructions. @@ -138,14 +144,15 @@ struct ReplacementIRBuilder : IRBuilder<InstSimplifyFolder> { } // end anonymous namespace -char AtomicExpand::ID = 0; +char ExpandAtomicLegacy::ID = 0; -char &llvm::AtomicExpandID = AtomicExpand::ID; +char &llvm::ExpandAtomicID = ExpandAtomicLegacy::ID; -INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions", false, - false) - -FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); } +INITIALIZE_PASS_BEGIN(ExpandAtomicLegacy, DEBUG_TYPE, + "Expand Atomic instructions", false, false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_END(ExpandAtomicLegacy, DEBUG_TYPE, + "Expand Atomic instructions", false, false) // Helper functions to retrieve the size of atomic instructions. static unsigned getAtomicOpSize(LoadInst *LI) { @@ -179,14 +186,9 @@ static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) { Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8; } -bool AtomicExpand::runOnFunction(Function &F) { - auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); - if (!TPC) - return false; - - auto &TM = TPC->getTM<TargetMachine>(); - const auto *Subtarget = TM.getSubtargetImpl(F); - if (!Subtarget->enableAtomicExpand()) +bool ExpandAtomicImpl::run(Function &F, const TargetMachine *TM) { + const auto *Subtarget = TM->getSubtargetImpl(F); + if (!Subtarget->enableExpandAtomic()) return false; TLI = Subtarget->getTargetLowering(); DL = &F.getParent()->getDataLayout(); @@ -340,7 +342,39 @@ bool AtomicExpand::runOnFunction(Function &F) { return MadeChange; } -bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) { +bool ExpandAtomicLegacy::runOnFunction(Function &F) { + if (skipFunction(F)) + return false; + + auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); + if (!TPC) + return false; + + auto *TM = &TPC->getTM<TargetMachine>(); + + ExpandAtomicImpl AE; + return AE.run(F, TM); +} + +FunctionPass *llvm::createExpandAtomicLegacyPass() { + return new ExpandAtomicLegacy(); +} + +PreservedAnalyses ExpandAtomicPass::run(Function &F, + FunctionAnalysisManager &AM) { + ExpandAtomicImpl AE; + + bool Changed = AE.run(F, TM); + if (!Changed) + return PreservedAnalyses::all(); + + PreservedAnalyses PA; + PA.preserveSet<CFGAnalyses>(); + return PA; +} + +bool ExpandAtomicImpl::bracketInstWithFences(Instruction *I, + AtomicOrdering Order) { ReplacementIRBuilder Builder(I, *DL); auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order); @@ -355,8 +389,8 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) { } /// Get the iX type with the same bitwidth as T. -IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T, - const DataLayout &DL) { +IntegerType * +ExpandAtomicImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) { EVT VT = TLI->getMemValueType(DL, T); unsigned BitWidth = VT.getStoreSizeInBits(); assert(BitWidth == VT.getSizeInBits() && "must be a power of two"); @@ -366,7 +400,7 @@ IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T, /// Convert an atomic load of a non-integral type to an integer load of the /// equivalent bitwidth. See the function comment on /// convertAtomicStoreToIntegerType for background. -LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) { +LoadInst *ExpandAtomicImpl::convertAtomicLoadToIntegerType(LoadInst *LI) { auto *M = LI->getModule(); Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout()); @@ -387,7 +421,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) { } AtomicRMWInst * -AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) { +ExpandAtomicImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) { auto *M = RMWI->getModule(); Type *NewTy = getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout()); @@ -414,7 +448,7 @@ AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) { return NewRMWI; } -bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) { +bool ExpandAtomicImpl::tryExpandAtomicLoad(LoadInst *LI) { switch (TLI->shouldExpandAtomicLoadInIR(LI)) { case TargetLoweringBase::AtomicExpansionKind::None: return false; @@ -436,7 +470,7 @@ bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) { } } -bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) { +bool ExpandAtomicImpl::tryExpandAtomicStore(StoreInst *SI) { switch (TLI->shouldExpandAtomicStoreInIR(SI)) { case TargetLoweringBase::AtomicExpansionKind::None: return false; @@ -451,7 +485,7 @@ bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) { } } -bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { +bool ExpandAtomicImpl::expandAtomicLoadToLL(LoadInst *LI) { ReplacementIRBuilder Builder(LI, *DL); // On some architectures, load-linked instructions are atomic for larger @@ -467,7 +501,7 @@ bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { return true; } -bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) { +bool ExpandAtomicImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) { ReplacementIRBuilder Builder(LI, *DL); AtomicOrdering Order = LI->getOrdering(); if (Order == AtomicOrdering::Unordered) @@ -496,7 +530,7 @@ bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) { /// instruction select from the original atomic store, but as a migration /// mechanism, we convert back to the old format which the backends understand. /// Each backend will need individual work to recognize the new format. -StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) { +StoreInst *ExpandAtomicImpl::convertAtomicStoreToIntegerType(StoreInst *SI) { ReplacementIRBuilder Builder(SI, *DL); auto *M = SI->getModule(); Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(), @@ -514,7 +548,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) { return NewSI; } -void AtomicExpand::expandAtomicStore(StoreInst *SI) { +void ExpandAtomicImpl::expandAtomicStore(StoreInst *SI) { // This function is only called on atomic stores that are too large to be // atomic if implemented as a native store. So we replace them by an // atomic swap, that can be implemented for example as a ldrex/strex on ARM @@ -561,7 +595,7 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr, NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy); } -bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { +bool ExpandAtomicImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) { LLVMContext &Ctx = AI->getModule()->getContext(); TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI); switch (Kind) { @@ -843,7 +877,7 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op, /// way as a typical atomicrmw expansion. The only difference here is /// that the operation inside of the loop may operate upon only a /// part of the value. -void AtomicExpand::expandPartwordAtomicRMW( +void ExpandAtomicImpl::expandPartwordAtomicRMW( AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) { AtomicOrdering MemOpOrder = AI->getOrdering(); SyncScope::ID SSID = AI->getSyncScopeID(); @@ -887,7 +921,7 @@ void AtomicExpand::expandPartwordAtomicRMW( } // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width. -AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) { +AtomicRMWInst *ExpandAtomicImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) { ReplacementIRBuilder Builder(AI, *DL); AtomicRMWInst::BinOp Op = AI->getOperation(); @@ -922,7 +956,7 @@ AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) { return NewAI; } -bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) { +bool ExpandAtomicImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) { // The basic idea here is that we're expanding a cmpxchg of a // smaller memory size up to a word-sized cmpxchg. To do this, we // need to add a retry-loop for strong cmpxchg, so that @@ -1047,7 +1081,7 @@ bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) { return true; } -void AtomicExpand::expandAtomicOpToLLSC( +void ExpandAtomicImpl::expandAtomicOpToLLSC( Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign, AtomicOrdering MemOpOrder, function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) { @@ -1059,7 +1093,7 @@ void AtomicExpand::expandAtomicOpToLLSC( I->eraseFromParent(); } -void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) { +void ExpandAtomicImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) { ReplacementIRBuilder Builder(AI, *DL); PartwordMaskValues PMV = @@ -1085,7 +1119,8 @@ void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) { AI->eraseFromParent(); } -void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) { +void ExpandAtomicImpl::expandAtomicCmpXchgToMaskedIntrinsic( + AtomicCmpXchgInst *CI) { ReplacementIRBuilder Builder(CI, *DL); PartwordMaskValues PMV = createMaskInstrs( @@ -1112,7 +1147,7 @@ void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) { CI->eraseFromParent(); } -Value *AtomicExpand::insertRMWLLSCLoop( +Value *ExpandAtomicImpl::insertRMWLLSCLoop( IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign, AtomicOrdering MemOpOrder, function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) { @@ -1168,7 +1203,7 @@ Value *AtomicExpand::insertRMWLLSCLoop( /// way to represent a pointer cmpxchg so that we can update backends one by /// one. AtomicCmpXchgInst * -AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) { +ExpandAtomicImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) { auto *M = CI->getModule(); Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(), M->getDataLayout()); @@ -1201,7 +1236,7 @@ AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) { return NewCI; } -bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { +bool ExpandAtomicImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); AtomicOrdering FailureOrder = CI->getFailureOrdering(); Value *Addr = CI->getPointerOperand(); @@ -1447,7 +1482,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { return true; } -bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) { +bool ExpandAtomicImpl::isIdempotentRMW(AtomicRMWInst *RMWI) { auto C = dyn_cast<ConstantInt>(RMWI->getValOperand()); if (!C) return false; @@ -1467,7 +1502,7 @@ bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) { } } -bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) { +bool ExpandAtomicImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) { if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) { tryExpandAtomicLoad(ResultingLoad); return true; @@ -1475,7 +1510,7 @@ bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) { return false; } -Value *AtomicExpand::insertRMWCmpXchgLoop( +Value *ExpandAtomicImpl::insertRMWCmpXchgLoop( IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign, AtomicOrdering MemOpOrder, SyncScope::ID SSID, function_ref<Value *(IRBuilderBase &, Value *)> PerformOp, @@ -1536,7 +1571,7 @@ Value *AtomicExpand::insertRMWCmpXchgLoop( return NewLoaded; } -bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) { +bool ExpandAtomicImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) { unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; unsigned ValueSize = getAtomicOpSize(CI); @@ -1567,7 +1602,7 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, // FIXME: If FP exceptions are observable, we should force them off for the // loop for the FP atomics. - Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop( + Value *Loaded = ExpandAtomicImpl::insertRMWCmpXchgLoop( Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(), AI->getOrdering(), AI->getSyncScopeID(), [&](IRBuilderBase &Builder, Value *Loaded) { @@ -1601,7 +1636,7 @@ static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, Size <= LargestSize; } -void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) { +void ExpandAtomicImpl::expandAtomicLoadToLibcall(LoadInst *I) { static const RTLIB::Libcall Libcalls[6] = { RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2, RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16}; @@ -1614,7 +1649,7 @@ void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) { report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load"); } -void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) { +void ExpandAtomicImpl::expandAtomicStoreToLibcall(StoreInst *I) { static const RTLIB::Libcall Libcalls[6] = { RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2, RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16}; @@ -1627,7 +1662,7 @@ void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) { report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store"); } -void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) { +void ExpandAtomicImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) { static const RTLIB::Libcall Libcalls[6] = { RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1, RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4, @@ -1705,7 +1740,7 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) { llvm_unreachable("Unexpected AtomicRMW operation."); } -void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) { +void ExpandAtomicImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) { ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation()); unsigned Size = getAtomicOpSize(I); @@ -1744,7 +1779,7 @@ void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) { // ATOMIC libcalls to be emitted. All of the other arguments besides // 'I' are extracted from the Instruction subclass by the // caller. Depending on the particular call, some will be null. -bool AtomicExpand::expandAtomicOpToLibcall( +bool ExpandAtomicImpl::expandAtomicOpToLibcall( Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand, Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering, AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) { diff --git a/llvm/lib/CodeGen/TargetSubtargetInfo.cpp b/llvm/lib/CodeGen/TargetSubtargetInfo.cpp index 6c97bc0568bdee..a8fa14bebbe686 100644 --- a/llvm/lib/CodeGen/TargetSubtargetInfo.cpp +++ b/llvm/lib/CodeGen/TargetSubtargetInfo.cpp @@ -24,7 +24,7 @@ TargetSubtargetInfo::TargetSubtargetInfo( TargetSubtargetInfo::~TargetSubtargetInfo() = default; -bool TargetSubtargetInfo::enableAtomicExpand() const { +bool TargetSubtargetInfo::enableExpandAtomic() const { return true; } diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index d0f3a55a12b056..c40d8aa9bbfb16 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -77,6 +77,7 @@ #include "llvm/CodeGen/CallBrPrepare.h" #include "llvm/CodeGen/CodeGenPrepare.h" #include "llvm/CodeGen/DwarfEHPrepare.h" +#include "llvm/CodeGen/ExpandAtomic.h" #include "llvm/CodeGen/ExpandLargeDivRem.h" #include "llvm/CodeGen/ExpandLargeFpConvert.h" #include "llvm/CodeGen/ExpandMemCmp.h" diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index 0b53b59787dd20..abf3ae0dfbbedd 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -312,6 +312,7 @@ FUNCTION_PASS("dot-post-dom", PostDomPrinter()) FUNCTION_PASS("dot-post-dom-only", PostDomOnlyPrinter()) FUNCTION_PASS("dse", DSEPass()) FUNCTION_PASS("dwarf-eh-prepare", DwarfEHPreparePass(TM)) +FUNCTION_PASS("expand-atomic", ExpandAtomicPass(TM)) FUNCTION_PASS("expand-large-div-rem", ExpandLargeDivRemPass(TM)) FUNCTION_PASS("expand-large-fp-convert", ExpandLargeFpConvertPass(TM)) FUNCTION_PASS("expand-memcmp", ExpandMemCmpPass(TM)) diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp index 144610e021c58e..449ebb6803fb0d 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -564,7 +564,7 @@ std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const { void AArch64PassConfig::addIRPasses() { // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg // ourselves. - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); // Expand any SVE vector library calls that we can't code generate directly. if (EnableSVEIntrinsicOpts && diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 0f3bb3e7b0d8d0..e802c0ad89a698 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -1054,7 +1054,7 @@ void AMDGPUPassConfig::addIRPasses() { addPass(createAMDGPUAtomicOptimizerPass(AMDGPUAtomicOptimizerStrategy)); } - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); if (TM.getOptLevel() > CodeGenOptLevel::None) { addPass(createAMDGPUPromoteAlloca()); diff --git a/llvm/lib/Target/ARC/ARCTargetMachine.cpp b/llvm/lib/Target/ARC/ARCTargetMachine.cpp index 4f612ae623b986..6f16b328af7efa 100644 --- a/llvm/lib/Target/ARC/ARCTargetMachine.cpp +++ b/llvm/lib/Target/ARC/ARCTargetMachine.cpp @@ -70,7 +70,7 @@ TargetPassConfig *ARCTargetMachine::createPassConfig(PassManagerBase &PM) { } void ARCPassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp index a99773691df123..a5d32793e6f57e 100644 --- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp +++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp @@ -418,7 +418,7 @@ void ARMPassConfig::addIRPasses() { if (TM->Options.ThreadModel == ThreadModel::Single) addPass(createLowerAtomicPass()); else - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); // Cmpxchg instructions are often used with a subsequent comparison to // determine whether it succeeded. We can exploit existing control-flow in diff --git a/llvm/lib/Target/BPF/BPFTargetMachine.cpp b/llvm/lib/Target/BPF/BPFTargetMachine.cpp index 8a6e7ae3663e0d..d94466c8892eb1 100644 --- a/llvm/lib/Target/BPF/BPFTargetMachine.cpp +++ b/llvm/lib/Target/BPF/BPFTargetMachine.cpp @@ -149,7 +149,7 @@ void BPFTargetMachine::registerPassBuilderCallbacks( } void BPFPassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); addPass(createBPFCheckAndAdjustIR()); TargetPassConfig::addIRPasses(); diff --git a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp index 8c268dc3161413..223e27b49fe3df 100644 --- a/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp +++ b/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp @@ -118,7 +118,7 @@ TargetPassConfig *CSKYTargetMachine::createPassConfig(PassManagerBase &PM) { } void CSKYPassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp index e7a692d67ba015..786641ce063435 100644 --- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -339,7 +339,7 @@ void HexagonPassConfig::addIRPasses() { addPass(createDeadCodeEliminationPass()); } - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); if (!NoOpt) { if (EnableInitialCFGCleanup) diff --git a/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp b/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp index 33479720183b43..0434bf5010e880 100644 --- a/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp +++ b/llvm/lib/Target/Lanai/LanaiTargetMachine.cpp @@ -106,7 +106,7 @@ LanaiTargetMachine::createPassConfig(PassManagerBase &PassManager) { } void LanaiPassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp index 62ae1dea00d6f8..786b845e32f480 100644 --- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp @@ -161,7 +161,7 @@ void LoongArchPassConfig::addIRPasses() { // pointer values N iterations ahead. if (TM->getOptLevel() != CodeGenOptLevel::None && EnableLoopDataPrefetch) addPass(createLoopDataPrefetchPass()); - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/M68k/M68kTargetMachine.cpp b/llvm/lib/Target/M68k/M68kTargetMachine.cpp index af8cb9a83a050e..e8d399e50e6c68 100644 --- a/llvm/lib/Target/M68k/M68kTargetMachine.cpp +++ b/llvm/lib/Target/M68k/M68kTargetMachine.cpp @@ -171,7 +171,7 @@ TargetPassConfig *M68kTargetMachine::createPassConfig(PassManagerBase &PM) { } void M68kPassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp b/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp index 283de46e57d5c4..a8cad1d8aefcb0 100644 --- a/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp +++ b/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp @@ -83,7 +83,7 @@ MachineFunctionInfo *MSP430TargetMachine::createMachineFunctionInfo( } void MSP430PassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/llvm/lib/Target/Mips/MipsTargetMachine.cpp index 07422283692929..5113b51938e7a4 100644 --- a/llvm/lib/Target/Mips/MipsTargetMachine.cpp +++ b/llvm/lib/Target/Mips/MipsTargetMachine.cpp @@ -263,7 +263,7 @@ std::unique_ptr<CSEConfigBase> MipsPassConfig::getCSEConfig() const { void MipsPassConfig::addIRPasses() { TargetPassConfig::addIRPasses(); - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); if (getMipsSubtarget().os16()) addPass(createMipsOs16Pass()); if (getMipsSubtarget().inMips16HardFloat()) diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp index fad69f5e80a7a8..a3a959de2d5f90 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp @@ -380,7 +380,7 @@ void NVPTXPassConfig::addIRPasses() { addStraightLineScalarOptimizationPasses(); } - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); addPass(createNVPTXCtorDtorLoweringLegacyPass()); // === LSR and other generic IR passes === diff --git a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp index aee57a5075ff71..a71a1b8e8439b0 100644 --- a/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp +++ b/llvm/lib/Target/PowerPC/PPCExpandAtomicPseudoInsts.cpp @@ -23,7 +23,7 @@ using namespace llvm; -#define DEBUG_TYPE "ppc-atomic-expand" +#define DEBUG_TYPE "ppc-expand-atomic" namespace { diff --git a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp index d676fa86a10e77..4dc3d193754896 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -457,7 +457,7 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) { void PPCPassConfig::addIRPasses() { if (TM->getOptLevel() != CodeGenOptLevel::None) addPass(createPPCBoolRetToIntPass()); - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); // Lower generic MASSV routines to PowerPC subtarget-specific entries. addPass(createPPCLowerMASSVEntriesPass()); diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index 3abdb6003659fa..27133b8ca95239 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -425,7 +425,7 @@ bool RISCVPassConfig::addRegAssignAndRewriteOptimized() { } void RISCVPassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); if (getOptLevel() != CodeGenOptLevel::None) { if (EnableLoopDataPrefetch) diff --git a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp index b408af2ea5943d..ac2d73d99bc3c7 100644 --- a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp +++ b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp @@ -175,7 +175,7 @@ TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) { } void SparcPassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp index 73e01e3ec18442..670e7b0a75fa97 100644 --- a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp +++ b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp @@ -228,7 +228,7 @@ void SystemZPassConfig::addIRPasses() { addPass(createLoopDataPrefetchPass()); } - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/VE/VETargetMachine.cpp b/llvm/lib/Target/VE/VETargetMachine.cpp index 6d102bfd3926af..c9c6f134b61faa 100644 --- a/llvm/lib/Target/VE/VETargetMachine.cpp +++ b/llvm/lib/Target/VE/VETargetMachine.cpp @@ -134,7 +134,7 @@ TargetPassConfig *VETargetMachine::createPassConfig(PassManagerBase &PM) { void VEPassConfig::addIRPasses() { // VE requires atomic expand pass. - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp index 912f61765579f8..100e3a60c5ea3b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp @@ -45,7 +45,7 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT, TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this) {} -bool WebAssemblySubtarget::enableAtomicExpand() const { +bool WebAssemblySubtarget::enableExpandAtomic() const { // If atomics are disabled, atomic ops are lowered instead of expanded return hasAtomics(); } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h index 85d02b087c786e..1f5bb72b027bdd 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h @@ -84,7 +84,7 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo { return &getInstrInfo()->getRegisterInfo(); } const Triple &getTargetTriple() const { return TargetTriple; } - bool enableAtomicExpand() const override; + bool enableExpandAtomic() const override; bool enableIndirectBrExpand() const override { return true; } bool enableMachineScheduler() const override; bool useAA() const override; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp index 2db1b6493cc476..77048a336e699e 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp @@ -478,7 +478,7 @@ void WebAssemblyPassConfig::addISelPrepare() { addPass(new CoalesceFeaturesAndStripAtomics(&getWebAssemblyTargetMachine())); // This is a no-op if atomics are not used in the module - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addISelPrepare(); } diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp index b92bffbe6239bb..70dedfbc4cff14 100644 --- a/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -435,7 +435,7 @@ MachineFunctionInfo *X86TargetMachine::createMachineFunctionInfo( } void X86PassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); // We add both pass anyway and when these two passes run, we skip the pass // based on the option level and option attribute. diff --git a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp index 345a8365ed49b3..c230efda852ecc 100644 --- a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp +++ b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp @@ -84,7 +84,7 @@ TargetPassConfig *XCoreTargetMachine::createPassConfig(PassManagerBase &PM) { } void XCorePassConfig::addIRPasses() { - addPass(createAtomicExpandPass()); + addPass(createExpandAtomicLegacyPass()); TargetPassConfig::addIRPasses(); } diff --git a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll index f45fc22783d1fb..0a0fe156305a97 100644 --- a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940 %s -; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand < %s | FileCheck --check-prefix=OPT %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic < %s | FileCheck --check-prefix=OPT %s define i32 @global_agent_monotonic_idempotent_or(ptr addrspace(1) %in) { ; GFX940-LABEL: global_agent_monotonic_idempotent_or: diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll index 81ad1604756835..fc88f3f9f5cce3 100644 --- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -atomic-expand < %s | FileCheck -check-prefix=IR %s +; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -passes=expand-atomic < %s | FileCheck -check-prefix=IR %s ; RUN: llc -mtriple=amdgcn-- -mcpu=tahiti < %s | FileCheck -check-prefix=GCN %s define i32 @load_atomic_private_seq_cst_i32(ptr addrspace(5) %ptr) { diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll index 2fc848a3a810b8..d454e125844639 100644 --- a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=aarch64-linux-gnu -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=expand-atomic %s | FileCheck %s define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) { ; CHECK-LABEL: @test_atomicrmw_fadd_f32( diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll index 47d626261bfc43..046324bd9d917f 100644 --- a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -atomic-expand %s | FileCheck %s -; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -atomic-expand %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS +; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -passes=expand-atomic %s | FileCheck %s +; RUN: opt -codegen-opt-level=1 -S -mtriple=aarch64-- -mattr=+outline-atomics -passes=expand-atomic %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS define void @atomic_swap_f16(ptr %ptr, half %val) nounwind { ; CHECK-LABEL: @atomic_swap_f16( diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll index 2e9efe911e6d6c..5c6c0ae31d9dd7 100644 --- a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll +++ b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=aarch64-linux-gnu -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=aarch64-linux-gnu -passes=expand-atomic %s | FileCheck %s define i8 @atomic8_load_unordered(ptr %a) nounwind uwtable { ; CHECK-LABEL: @atomic8_load_unordered( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll index b846c1f77538e6..c968c4ec5cc961 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16-system.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll index 7f5d6e7cb76f82..3f902ce2d3ef53 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s -; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s +; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll index f796d3cca3036f..f3982fcd9897df 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8-system.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s define i8 @test_atomicrmw_xchg_i8_global_system(ptr addrspace(1) %ptr, i8 %value) { ; CHECK-LABEL: @test_atomicrmw_xchg_i8_global_system( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll index 6a6e416bdbc89d..9b725ed4cb06c2 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s --check-prefixes=CHECK,GCN -; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s --check-prefixes=CHECK,R600 +; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s --check-prefixes=CHECK,GCN +; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s --check-prefixes=CHECK,R600 define i8 @test_atomicrmw_xchg_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) { ; GCN-LABEL: @test_atomicrmw_xchg_i8_global_agent( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll index 5d7825bb378876..e601021cd3ca38 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -atomic-expand %s | FileCheck -check-prefix=GFX908 %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -atomic-expand %s | FileCheck -check-prefix=GFX940 %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -atomic-expand %s | FileCheck -check-prefix=GFX1100 %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=expand-atomic %s | FileCheck -check-prefix=GFX908 %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=expand-atomic %s | FileCheck -check-prefix=GFX940 %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=expand-atomic %s | FileCheck -check-prefix=GFX1100 %s define float @syncscope_system(ptr %addr, float %val) #0 { ; GFX908-LABEL: @syncscope_system( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll index 97c041168d147b..6be6771ed920ac 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=CI %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GFX9 %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -atomic-expand %s | FileCheck -check-prefix=GFX908 %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -atomic-expand %s | FileCheck -check-prefix=GFX940 %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -atomic-expand %s | FileCheck -check-prefix=GFX11 %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=CI %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GFX9 %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=expand-atomic %s | FileCheck -check-prefix=GFX908 %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -passes=expand-atomic %s | FileCheck -check-prefix=GFX940 %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=expand-atomic %s | FileCheck -check-prefix=GFX11 %s define void @test_atomicrmw_fadd_f32_global_no_use_unsafe(ptr addrspace(1) %ptr, float %value) #0 { ; CI-LABEL: @test_atomicrmw_fadd_f32_global_no_use_unsafe( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll index 9dfbe9b4eb7413..9df002ea0b2a87 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmax.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s define float @test_atomicrmw_fmax_f32_flat(ptr %ptr, float %value) { ; GCN-LABEL: @test_atomicrmw_fmax_f32_flat( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll index 5a732653b48b14..78cf7d0769ce39 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fmin.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s define float @test_atomicrmw_fmin_f32_flat(ptr %ptr, float %value) { ; GCN-LABEL: @test_atomicrmw_fmin_f32_flat( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll index 9805c317b9215e..5d4a219a8fe5fa 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s | FileCheck -check-prefix=GCN %s define float @test_atomicrmw_fsub_f32_flat(ptr %ptr, float %value) { ; GCN-LABEL: @test_atomicrmw_fsub_f32_flat( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll index 5fa9dcc4ad9bf0..e1b0ea7d0f5bcf 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-nand.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s -; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=expand-atomic %s | FileCheck %s +; RUN: opt -mtriple=r600-mesa-mesa3d -S -passes=expand-atomic %s | FileCheck %s define i32 @test_atomicrmw_nand_i32_flat(ptr %ptr, i32 %value) { ; CHECK-LABEL: @test_atomicrmw_nand_i32_flat( diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll index aceb897a7d487d..2fc671b2624227 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-simplify-cfg-CAS-block.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -atomic-expand %s | FileCheck -check-prefix=GFX90A %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=expand-atomic %s | FileCheck -check-prefix=GFX90A %s declare i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll index 6c84474edc05bb..bda3cc4c57a88e 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s 2>&1 | FileCheck --check-prefix=GCN %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=expand-atomic %s 2>&1 | FileCheck --check-prefix=GCN %s define i32 @atomic_load_global_align1(ptr addrspace(1) %ptr) { ; GCN-LABEL: @atomic_load_global_align1( diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll index 353aafb9727a5b..786aad24c4d348 100644 --- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll +++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v7.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-expand -codegen-opt-level=1 %s | FileCheck %s +; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -passes=expand-atomic -codegen-opt-level=1 %s | FileCheck %s define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) { ; CHECK-LABEL: @test_atomic_xchg_i8 diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll index bad28b2b6824e5..a8a212aa2cb36d 100644 --- a/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll +++ b/llvm/test/Transforms/AtomicExpand/ARM/atomic-expansion-v8.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-expand %s -codegen-opt-level=1 | FileCheck %s +; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -passes=expand-atomic %s -codegen-opt-level=1 | FileCheck %s define i8 @test_atomic_xchg_i8(ptr %ptr, i8 %xchgend) { ; CHECK-LABEL: @test_atomic_xchg_i8 diff --git a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll index d0268bf3e00796..50336e82719bb3 100644 --- a/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=armv7-apple-ios7.0 -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=armv7-apple-ios7.0 -passes=expand-atomic %s | FileCheck %s define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) { ; CHECK-LABEL: @test_atomicrmw_fadd_f32( diff --git a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll index f7a210d631bf95..56583ce6aed46f 100644 --- a/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll +++ b/llvm/test/Transforms/AtomicExpand/ARM/cmpxchg-weak.ll @@ -1,4 +1,4 @@ -; RUN: opt -atomic-expand -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s +; RUN: opt -passes=expand-atomic -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s define i32 @test_cmpxchg_seq_cst(ptr %addr, i32 %desired, i32 %new) { ; CHECK-LABEL: @test_cmpxchg_seq_cst diff --git a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll index 8827eb5d8e1088..4d759ecbc7c7a6 100644 --- a/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=hexagon-- -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=hexagon-- -passes=expand-atomic %s | FileCheck %s define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) { ; CHECK-LABEL: @test_atomicrmw_fadd_f32( diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll index 43fdd25e257b82..18977862cfca06 100644 --- a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S --mtriple=loongarch64 --atomic-expand --mattr=+d %s | FileCheck %s +; RUN: opt -S --mtriple=loongarch64 -passes=expand-atomic --mattr=+d %s | FileCheck %s define float @atomicrmw_fadd_float(ptr %ptr, float %value) { ; CHECK-LABEL: @atomicrmw_fadd_float( diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll index b0875669bc3a21..6bb42d17bd5b95 100644 --- a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll +++ b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S --mtriple=loongarch32 --atomic-expand %s | FileCheck %s --check-prefix=LA32 -; RUN: opt -S --mtriple=loongarch64 --atomic-expand %s | FileCheck %s --check-prefix=LA64 +; RUN: opt -S --mtriple=loongarch32 -passes=expand-atomic %s | FileCheck %s --check-prefix=LA32 +; RUN: opt -S --mtriple=loongarch64 -passes=expand-atomic %s | FileCheck %s --check-prefix=LA64 define i8 @load_acquire_i8(ptr %ptr) { ; LA32-LABEL: @load_acquire_i8( diff --git a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll index 2c90a70bd0ad05..acc7e2e9869a95 100644 --- a/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=mips64-mti-linux-gnu -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=mips64-mti-linux-gnu -passes=expand-atomic %s | FileCheck %s define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) { ; CHECK-LABEL: @test_atomicrmw_fadd_f32( diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll index 7e42735feabfff..d8ab8d9b987ef3 100644 --- a/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=powerpc64-unknown-unknown -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=powerpc64-unknown-unknown -passes=expand-atomic %s | FileCheck %s define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) { ; CHECK-LABEL: @test_atomicrmw_fadd_f32( diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll index 19e5f56821d746..90ed8128e8898f 100644 --- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll +++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-double.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -atomic-expand -mtriple=powerpc64le-unknown-unknown \ +; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64le-unknown-unknown \ ; RUN: < %s 2>&1 | FileCheck %s -; RUN: opt -S -atomic-expand -mtriple=powerpc64-unknown-unknown \ +; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64-unknown-unknown \ ; RUN: < %s 2>&1 | FileCheck %s define double @foo(ptr %dp) { diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll index 62f0db00df800b..b5be91fca077ff 100644 --- a/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll +++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cfence-float.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -atomic-expand -mtriple=powerpc64le-unknown-unknown \ +; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64le-unknown-unknown \ ; RUN: < %s 2>&1 | FileCheck %s -; RUN: opt -S -atomic-expand -mtriple=powerpc64-unknown-unknown \ +; RUN: opt -S -passes=expand-atomic -mtriple=powerpc64-unknown-unknown \ ; RUN: < %s 2>&1 | FileCheck %s define float @bar(ptr %fp) { diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll index 169d73cc0308d3..19a16a43f0be81 100644 --- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll +++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \ +; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \ ; RUN: -mcpu=pwr8 %s | FileCheck %s -; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \ +; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \ ; RUN: -mcpu=pwr7 %s | FileCheck --check-prefix=PWR7 %s define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) { diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll index 342506301d0046..b6fe0febf1e553 100644 --- a/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll +++ b/llvm/test/Transforms/AtomicExpand/PowerPC/issue55983.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -atomic-expand -S -mtriple=powerpc64le-unknown-unknown \ +; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64le-unknown-unknown \ ; RUN: %s | FileCheck %s -; RUN: opt -atomic-expand -S -mtriple=powerpc64-unknown-unknown \ +; RUN: opt -passes=expand-atomic -S -mtriple=powerpc64-unknown-unknown \ ; RUN: %s | FileCheck %s define ptr @foo(ptr %p) { diff --git a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll index ceaafd89990b05..20d0347579f305 100644 --- a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=riscv32-- -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-- -passes=expand-atomic %s | FileCheck %s define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) { ; CHECK-LABEL: @test_atomicrmw_fadd_f32( diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll index 4427c5e7ed23dc..7557b722d6949a 100644 --- a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll +++ b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll @@ -1,4 +1,4 @@ -; RUN: opt -S %s -atomic-expand | FileCheck %s +; RUN: opt -S %s -passes=expand-atomic | FileCheck %s ;;; NOTE: this test is actually target-independent -- any target which ;;; doesn't support inline atomics can be used. (E.g. X86 i386 would diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll index 5bcb21105df8bb..9e238594725394 100644 --- a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll +++ b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S %s -atomic-expand | FileCheck %s +; RUN: opt -S %s -passes=expand-atomic | FileCheck %s ;; Verify the cmpxchg and atomicrmw expansions where sub-word-size ;; instructions are not available. diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll index 8d71966c04d039..9af61056137bea 100644 --- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll +++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s define i256 @atomic_load256_libcall(ptr %ptr) nounwind { diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll index dab7677086e91c..6105857dcb7a80 100644 --- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll +++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll @@ -1,4 +1,4 @@ -; RUN: opt -S %s -atomic-expand -mtriple=x86_64-linux-gnu | FileCheck %s +; RUN: opt -S %s -passes=expand-atomic -mtriple=x86_64-linux-gnu | FileCheck %s ; This file tests the functions `llvm::convertAtomicLoadToIntegerType` and ; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll index 69837b96a90d00..3d561d54b1146f 100644 --- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) { ; CHECK-LABEL: @test_atomicrmw_fadd_f32( diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll index fba1512368ea27..592b2e8f933dc0 100644 --- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll +++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-initial-load.ll @@ -1,4 +1,4 @@ -; RUN: opt -S %s -atomic-expand -mtriple=i686-linux-gnu | FileCheck %s +; RUN: opt -S %s -passes=expand-atomic -mtriple=i686-linux-gnu | FileCheck %s ; This file tests the function `llvm::expandAtomicRMWToCmpXchg`. ; It isn't technically target specific, but is exposed through a pass that is. diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll index 2464af3336ef3f..ba43255b418afb 100644 --- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll +++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s +; RUN: opt -S -mtriple=i686-linux-gnu -passes=expand-atomic %s | FileCheck %s define double @atomic_xchg_f64(ptr %ptr) nounwind { ; CHECK-LABEL: @atomic_xchg_f64( diff --git a/llvm/tools/opt/opt.cpp b/llvm/tools/opt/opt.cpp index c649e6ecddc080..6ed5a71d93d12a 100644 --- a/llvm/tools/opt/opt.cpp +++ b/llvm/tools/opt/opt.cpp @@ -347,7 +347,6 @@ static bool shouldPinPassToLegacyPM(StringRef Pass) { "interleaved-load-combine", "unreachableblockelim", "verify-safepoint-ir", - "atomic-expand", "expandvp", "mve-tail-predication", "interleaved-access", @@ -427,7 +426,6 @@ int main(int argc, char **argv) { initializeSelectOptimizePass(Registry); initializeCallBrPreparePass(Registry); initializeCodeGenPrepareLegacyPassPass(Registry); - initializeAtomicExpandPass(Registry); initializeWinEHPreparePass(Registry); initializeDwarfEHPrepareLegacyPassPass(Registry); initializeSafeStackLegacyPassPass(Registry); diff --git a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn index 047f6583ec4e88..1d5af1bf24dd18 100644 --- a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn @@ -39,7 +39,6 @@ static_library("CodeGen") { "AllocationOrder.cpp", "Analysis.cpp", "AssignmentTrackingAnalysis.cpp", - "AtomicExpandPass.cpp", "BasicBlockPathCloning.cpp", "BasicBlockSections.cpp", "BasicBlockSectionsProfileReader.cpp", @@ -69,6 +68,7 @@ static_library("CodeGen") { "EarlyIfConversion.cpp", "EdgeBundles.cpp", "ExecutionDomainFix.cpp", + "ExpandAtomicPass.cpp", "ExpandLargeDivRem.cpp", "ExpandLargeFpConvert.cpp", "ExpandMemCmp.cpp", >From 68ae55b820134067d22cce8afe9bfacb25b87aba Mon Sep 17 00:00:00 2001 From: Rishabh Bali <rishabhsb...@gmail.com> Date: Sun, 14 Jan 2024 22:58:34 +0530 Subject: [PATCH 2/4] Minor change --- llvm/include/llvm/InitializePasses.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index efcfa080912b82..4c45f420252538 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -100,7 +100,7 @@ void initializeEarlyMachineLICMPass(PassRegistry&); void initializeEarlyTailDuplicatePass(PassRegistry&); void initializeEdgeBundlesPass(PassRegistry&); void initializeEHContGuardCatchretPass(PassRegistry &); -void initializeExpandAtomicPass(PassRegistry&); +void initializeExpandAtomicLegacyPass(PassRegistry&); void initializeExpandLargeFpConvertLegacyPassPass(PassRegistry&); void initializeExpandLargeDivRemLegacyPassPass(PassRegistry&); void initializeExpandMemCmpLegacyPassPass(PassRegistry &); >From 7bcf6cb98290819c764182b034ec9909cc3294c3 Mon Sep 17 00:00:00 2001 From: Rishabh Bali <rishabhsb...@gmail.com> Date: Mon, 15 Jan 2024 00:02:52 +0530 Subject: [PATCH 3/4] Port expand atomic pass to new PM --- llvm/include/llvm/CodeGen/Passes.h | 2 +- llvm/include/llvm/LinkAllPasses.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h index 4f63c32d9fd2eb..4642adbc5039ef 100644 --- a/llvm/include/llvm/CodeGen/Passes.h +++ b/llvm/include/llvm/CodeGen/Passes.h @@ -44,7 +44,7 @@ namespace llvm { /// ExpandAtomicPass - At IR level this pass replace atomic instructions with /// __atomic_* library calls, or target specific instruction which implement the /// same semantics in a way which better fits the target backend. - FunctionPass *createExpandAtomicPass(); + FunctionPass *createExpandAtomicLegacyPass(); /// createUnreachableBlockEliminationPass - The LLVM code generator does not /// work well with unreachable basic blocks (what live ranges make sense for a diff --git a/llvm/include/llvm/LinkAllPasses.h b/llvm/include/llvm/LinkAllPasses.h index fe7fedad18bc0e..bb49055ef19c38 100644 --- a/llvm/include/llvm/LinkAllPasses.h +++ b/llvm/include/llvm/LinkAllPasses.h @@ -14,6 +14,7 @@ #ifndef LLVM_LINKALLPASSES_H #define LLVM_LINKALLPASSES_H +#include "CodeGen/Passes.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysisEvaluator.h" #include "llvm/Analysis/AliasSetTracker.h" @@ -118,6 +119,7 @@ namespace { (void) llvm::createGVNPass(); (void) llvm::createPostDomTree(); (void) llvm::createMergeICmpsLegacyPass(); + (void) llvm::createExpandAtomicLegacyPass(); (void) llvm::createExpandLargeDivRemPass(); (void)llvm::createExpandMemCmpLegacyPass(); (void) llvm::createExpandVectorPredicationPass(); >From bae1090a41dc4606d912c063f484c2fb5e311bac Mon Sep 17 00:00:00 2001 From: Rishabh Bali <rishabhsb...@gmail.com> Date: Mon, 15 Jan 2024 13:16:29 +0530 Subject: [PATCH 4/4] Tests failures --- clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu | 2 +- .../test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl | 12 ++++++------ .../CodeGen/AArch64/partial-pipeline-execution.ll | 4 ++-- .../CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu b/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu index 946927d88a1ee1..6ac9c7bdef639b 100644 --- a/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu +++ b/clang/test/CodeGenCUDA/atomics-remarks-gfx90a.cu @@ -1,5 +1,5 @@ // RUN: %clang_cc1 %s -triple=amdgcn-amd-amdhsa -fcuda-is-device \ -// RUN: -target-cpu gfx90a -Rpass=atomic-expand -S -o - 2>&1 | \ +// RUN: -target-cpu gfx90a -Rpass=expand-atomic -S -o - 2>&1 | \ // RUN: FileCheck %s --check-prefix=GFX90A-CAS // REQUIRES: amdgpu-registered-target diff --git a/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl b/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl index a5321ea7c158da..5d5464be04a090 100644 --- a/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl +++ b/clang/test/CodeGenOpenCL/atomics-cas-remarks-gfx90a.cl @@ -1,9 +1,9 @@ // RUN: %clang_cc1 %s -cl-std=CL2.0 -O0 -triple=amdgcn-amd-amdhsa -target-cpu gfx90a \ -// RUN: -Rpass=atomic-expand -S -o - 2>&1 | \ +// RUN: -Rpass=expand-atomic -S -o - 2>&1 | \ // RUN: FileCheck %s --check-prefix=REMARK // RUN: %clang_cc1 %s -cl-std=CL2.0 -O0 -triple=amdgcn-amd-amdhsa -target-cpu gfx90a \ -// RUN: -Rpass=atomic-expand -S -emit-llvm -o - 2>&1 | \ +// RUN: -Rpass=expand-atomic -S -emit-llvm -o - 2>&1 | \ // RUN: FileCheck %s --check-prefix=GFX90A-CAS // REQUIRES: amdgpu-registered-target @@ -26,10 +26,10 @@ typedef enum memory_scope { #endif } memory_scope; -// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at workgroup-one-as memory scope [-Rpass=atomic-expand] -// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at agent-one-as memory scope [-Rpass=atomic-expand] -// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at one-as memory scope [-Rpass=atomic-expand] -// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at wavefront-one-as memory scope [-Rpass=atomic-expand] +// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at workgroup-one-as memory scope [-Rpass=expand-atomic] +// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at agent-one-as memory scope [-Rpass=expand-atomic] +// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at one-as memory scope [-Rpass=expand-atomic] +// REMARK: remark: A compare and swap loop was generated for an atomic fadd operation at wavefront-one-as memory scope [-Rpass=expand-atomic] // GFX90A-CAS-LABEL: @atomic_cas // GFX90A-CAS: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("workgroup-one-as") monotonic // GFX90A-CAS: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("agent-one-as") monotonic diff --git a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll index c2ef2fa16a9a28..fc071e29c9467f 100644 --- a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll +++ b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll @@ -1,6 +1,6 @@ ; RUN: llc -O3 %s -o %t.s -; RUN: llc -O3 -stop-after=atomic-expand %s -o %t.mir -; RUN: llc -O3 -start-after=atomic-expand %s -o %t2.s +; RUN: llc -O3 -stop-after=expand-atomic %s -o %t.mir +; RUN: llc -O3 -start-after=expand-atomic %s -o %t2.s ; If we add tti pass correctly files should be identical ; Otherwise LSR will use default TargetTransformInfo and diff --git a/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll index 2f7d1e9a6efafd..fcb32f10493d42 100644 --- a/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll +++ b/llvm/test/CodeGen/AMDGPU/atomics-cas-remarks-gfx90a.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs --pass-remarks=atomic-expand \ +; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs --pass-remarks=expand-atomic \ ; RUN: %s -o - 2>&1 | FileCheck %s --check-prefix=GFX90A-CAS ; GFX90A-CAS: A compare and swap loop was generated for an atomic fadd operation at system memory scope _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits