Author: Matt Arsenault Date: 2022-08-04T19:02:56-04:00 New Revision: c5b36ab1d6a667554bf369c34e51d02add039d16
URL: https://github.com/llvm/llvm-project/commit/c5b36ab1d6a667554bf369c34e51d02add039d16 DIFF: https://github.com/llvm/llvm-project/commit/c5b36ab1d6a667554bf369c34e51d02add039d16.diff LOG: AMDGPU/clang: Remove dead code The order has to be a constant and should be enforced by the builtin definition. The fallthrough behavior would have been broken anyway. There's still an existing issue/assert if you try to use garbage for the ordering. The IRGen should be broken, but we also hit another assert before that. Fixes issue 56832 Added: Modified: clang/lib/CodeGen/CGBuiltin.cpp clang/lib/CodeGen/CodeGenFunction.h Removed: ################################################################################ diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index f1c7c5156f3d3..04c68fa18cf68 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -16504,39 +16504,35 @@ Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) { // it into LLVM's memory ordering specifier using atomic C ABI, and writes // to \p AO. \p Scope takes a const char * and converts it into AMDGCN // specific SyncScopeID and writes it to \p SSID. -bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, +void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, llvm::AtomicOrdering &AO, llvm::SyncScope::ID &SSID) { - if (isa<llvm::ConstantInt>(Order)) { - int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); - - // Map C11/C++11 memory ordering to LLVM memory ordering - assert(llvm::isValidAtomicOrderingCABI(ord)); - switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { - case llvm::AtomicOrderingCABI::acquire: - case llvm::AtomicOrderingCABI::consume: - AO = llvm::AtomicOrdering::Acquire; - break; - case llvm::AtomicOrderingCABI::release: - AO = llvm::AtomicOrdering::Release; - break; - case llvm::AtomicOrderingCABI::acq_rel: - AO = llvm::AtomicOrdering::AcquireRelease; - break; - case llvm::AtomicOrderingCABI::seq_cst: - AO = llvm::AtomicOrdering::SequentiallyConsistent; - break; - case llvm::AtomicOrderingCABI::relaxed: - AO = llvm::AtomicOrdering::Monotonic; - break; - } - - StringRef scp; - llvm::getConstantStringInfo(Scope, scp); - SSID = getLLVMContext().getOrInsertSyncScopeID(scp); - return true; + int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); + + // Map C11/C++11 memory ordering to LLVM memory ordering + assert(llvm::isValidAtomicOrderingCABI(ord)); + switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { + case llvm::AtomicOrderingCABI::acquire: + case llvm::AtomicOrderingCABI::consume: + AO = llvm::AtomicOrdering::Acquire; + break; + case llvm::AtomicOrderingCABI::release: + AO = llvm::AtomicOrdering::Release; + break; + case llvm::AtomicOrderingCABI::acq_rel: + AO = llvm::AtomicOrdering::AcquireRelease; + break; + case llvm::AtomicOrderingCABI::seq_cst: + AO = llvm::AtomicOrdering::SequentiallyConsistent; + break; + case llvm::AtomicOrderingCABI::relaxed: + AO = llvm::AtomicOrdering::Monotonic; + break; } - return false; + + StringRef scp; + llvm::getConstantStringInfo(Scope, scp); + SSID = getLLVMContext().getOrInsertSyncScopeID(scp); } Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, @@ -16966,12 +16962,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType()); return Builder.CreateCall(F, { Src0, Src1, Src2 }); } - case AMDGPU::BI__builtin_amdgcn_fence: { - if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)), - EmitScalarExpr(E->getArg(1)), AO, SSID)) - return Builder.CreateFence(AO, SSID); - LLVM_FALLTHROUGH; + ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)), + EmitScalarExpr(E->getArg(1)), AO, SSID); + return Builder.CreateFence(AO, SSID); } case AMDGPU::BI__builtin_amdgcn_atomic_inc32: case AMDGPU::BI__builtin_amdgcn_atomic_inc64: @@ -16997,22 +16991,20 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, llvm::Function *F = CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()}); - if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)), - EmitScalarExpr(E->getArg(3)), AO, SSID)) { + ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)), + EmitScalarExpr(E->getArg(3)), AO, SSID); - // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and - // scope as unsigned values - Value *MemOrder = Builder.getInt32(static_cast<int>(AO)); - Value *MemScope = Builder.getInt32(static_cast<int>(SSID)); + // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and + // scope as unsigned values + Value *MemOrder = Builder.getInt32(static_cast<int>(AO)); + Value *MemScope = Builder.getInt32(static_cast<int>(SSID)); - QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); - bool Volatile = - PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); - Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile)); + QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); + bool Volatile = + PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); + Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile)); - return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile}); - } - LLVM_FALLTHROUGH; + return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile}); } default: return nullptr; diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 30cf162e36206..93d4263082070 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4248,7 +4248,7 @@ class CodeGenFunction : public CodeGenTypeCache { llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue); - bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, + void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, llvm::AtomicOrdering &AO, llvm::SyncScope::ID &SSID); _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits