Author: Florian Hahn Date: 2021-01-22T15:10:12Z New Revision: 86991d3231334538cccf6732056cbb641046bd49
URL: https://github.com/llvm/llvm-project/commit/86991d3231334538cccf6732056cbb641046bd49 DIFF: https://github.com/llvm/llvm-project/commit/86991d3231334538cccf6732056cbb641046bd49.diff LOG: [LoopUnswitch] Fix logic to avoid unswitching with atomic loads. The existing code did not deal with atomic loads correctly. Such loads are represented as MemoryDefs. Bail out on any MemoryAccess that is not a MemoryUse. Added: Modified: llvm/lib/Transforms/Scalar/LoopUnswitch.cpp llvm/test/Transforms/LoopUnswitch/partial-unswitch.ll Removed: ################################################################################ diff --git a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp index 2e3ab5029fd2..ecf2756bc720 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -681,16 +681,22 @@ hasPartialIVCondition(Loop *L, MemorySSA &MSSA, AAResults *AA) { if (!isa<LoadInst>(I) && !isa<GetElementPtrInst>(I)) return {}; - // Do not duplicate volatile loads. + // Do not duplicate volatile and atomic loads. if (auto *LI = dyn_cast<LoadInst>(I)) - if (LI->isVolatile()) + if (LI->isVolatile() || LI->isAtomic()) return {}; ToDuplicate.push_back(I); - if (auto *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(I))) { - // Queue the defining access to check for alias checks. - AccessesToCheck.push_back(MemUse->getDefiningAccess()); - AccessedLocs.push_back(MemoryLocation::get(I)); + if (MemoryAccess *MA = MSSA.getMemoryAccess(I)) { + if (auto *MemUse = dyn_cast_or_null<MemoryUse>(MA)) { + // Queue the defining access to check for alias checks. + AccessesToCheck.push_back(MemUse->getDefiningAccess()); + AccessedLocs.push_back(MemoryLocation::get(I)); + } else { + // MemoryDefs may clobber the location or may be atomic memory + // operations. Bail out. + return {}; + } } WorkList.append(I->op_begin(), I->op_end()); } @@ -972,6 +978,8 @@ bool LoopUnswitch::processCurrentLoop() { !findOptionMDForLoop(CurrentLoop, "llvm.loop.unswitch.partial.disable")) { auto ToDuplicate = hasPartialIVCondition(CurrentLoop, *MSSA, AA); if (!ToDuplicate.first.empty()) { + LLVM_DEBUG(dbgs() << "loop-unswitch: Found partially invariant condition " + << *ToDuplicate.first[0] << "\n"); ++NumBranches; unswitchIfProfitable(ToDuplicate.first[0], ToDuplicate.second, CurrentLoop->getHeader()->getTerminator(), diff --git a/llvm/test/Transforms/LoopUnswitch/partial-unswitch.ll b/llvm/test/Transforms/LoopUnswitch/partial-unswitch.ll index 3e4c9369997e..1c2471cef9ba 100644 --- a/llvm/test/Transforms/LoopUnswitch/partial-unswitch.ll +++ b/llvm/test/Transforms/LoopUnswitch/partial-unswitch.ll @@ -731,11 +731,10 @@ exit: ; Do not unswitch if the condition depends on an atomic load. Duplicating such ; loads is not safe. -; TODO define i32 @no_partial_unswitch_atomic_load_unordered(i32* %ptr, i32 %N) { ; CHECK-LABEL: @no_partial_unswitch_atomic_load_unordered ; CHECK-NEXT: entry: -; CHECK-NEXT: load +; CHECK-NEXT: br label %loop.header ; entry: br label %loop.header @@ -764,11 +763,10 @@ exit: ; Do not unswitch if the condition depends on an atomic load. Duplicating such ; loads is not safe. -; TODO define i32 @no_partial_unswitch_atomic_load_monotonic(i32* %ptr, i32 %N) { ; CHECK-LABEL: @no_partial_unswitch_atomic_load_monotonic ; CHECK-NEXT: entry: -; CHECK-NEXT: load +; CHECK-NEXT: br label %loop.header ; entry: br label %loop.header _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits