llvmbot wrote:

<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-clangir

Author: Sirui Mu (Lancern)

<details>
<summary>Changes</summary>

This patch adds support for atomic loads and stores. Specifically, it adds 
support for the following intrinsic calls:

- `__atomic_load` and `__atomic_store`;
- `__c11_atomic_load` and `__c11_atomic_store`.

---

Patch is 27.26 KiB, truncated to 20.00 KiB below, full version: 
https://github.com/llvm/llvm-project/pull/153814.diff


10 Files Affected:

- (modified) clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h (+6-3) 
- (modified) clang/include/clang/CIR/Dialect/IR/CIROps.td (+20-3) 
- (modified) clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h (+12) 
- (modified) clang/include/clang/CIR/MissingFeatures.h (+2-1) 
- (modified) clang/lib/CIR/CodeGen/Address.h (+6) 
- (modified) clang/lib/CIR/CodeGen/CIRGenAtomic.cpp (+229-3) 
- (modified) clang/lib/CIR/CodeGen/CIRGenBuilder.h (+8-4) 
- (modified) clang/lib/CIR/CodeGen/CIRGenFunction.h (+1-1) 
- (modified) clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp (+31-11) 
- (modified) clang/test/CIR/CodeGen/atomic.c (+106) 


``````````diff
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h 
b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 986c8c3d133ac..b686f12521c6e 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -230,8 +230,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
   }
 
   cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value 
dst,
-                           mlir::IntegerAttr align = {}) {
-    return create<cir::StoreOp>(loc, val, dst, align);
+                           bool isVolatile = false,
+                           mlir::IntegerAttr align = {},
+                           cir::MemOrderAttr order = {}) {
+    return cir::StoreOp::create(*this, loc, val, dst, align, order);
   }
 
   [[nodiscard]] cir::GlobalOp createGlobal(mlir::ModuleOp mlirModule,
@@ -254,7 +256,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
                                clang::CharUnits alignment) {
     mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
     auto addr = createAlloca(loc, getPointerTo(type), type, {}, alignmentAttr);
-    return create<cir::LoadOp>(loc, addr, /*isDeref=*/false, alignmentAttr);
+    return cir::LoadOp::create(*this, loc, addr, /*isDeref=*/false,
+                               alignmentAttr, /*mem_order=*/{});
   }
 
   cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base,
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index b64fd2734a63c..c231c44b4b35a 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -299,6 +299,20 @@ def CIR_ConstantOp : CIR_Op<"const", [
   let hasFolder = 1;
 }
 
+//===----------------------------------------------------------------------===//
+// C/C++ memory order definitions
+//===----------------------------------------------------------------------===//
+
+def CIR_MemOrder : CIR_I32EnumAttr<
+  "MemOrder", "Memory order according to C++11 memory model", [
+    I32EnumAttrCase<"Relaxed", 0, "relaxed">,
+    I32EnumAttrCase<"Consume", 1, "consume">,
+    I32EnumAttrCase<"Acquire", 2, "acquire">,
+    I32EnumAttrCase<"Release", 3, "release">,
+    I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">,
+    I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">
+]>;
+
 
//===----------------------------------------------------------------------===//
 // AllocaOp
 
//===----------------------------------------------------------------------===//
@@ -408,13 +422,14 @@ def CIR_LoadOp : CIR_Op<"load", [
   let arguments = (ins Arg<CIR_PointerType, "the address to load from",
                            [MemRead]>:$addr,
                        UnitAttr:$isDeref,
-                       OptionalAttr<I64Attr>:$alignment
-                       );
+                       OptionalAttr<I64Attr>:$alignment,
+                       OptionalAttr<CIR_MemOrder>:$mem_order);
   let results = (outs CIR_AnyType:$result);
 
   let assemblyFormat = [{
     (`deref` $isDeref^)?
     (`align` `(` $alignment^ `)`)?
+    (`atomic` `(` $mem_order^ `)`)?
     $addr `:` qualified(type($addr)) `,` type($result) attr-dict
   }];
 
@@ -451,10 +466,12 @@ def CIR_StoreOp : CIR_Op<"store", [
   let arguments = (ins CIR_AnyType:$value,
                        Arg<CIR_PointerType, "the address to store the value",
                            [MemWrite]>:$addr,
-                           OptionalAttr<I64Attr>:$alignment);
+                       OptionalAttr<I64Attr>:$alignment,
+                       OptionalAttr<CIR_MemOrder>:$mem_order);
 
   let assemblyFormat = [{
     (`align` `(` $alignment^ `)`)?
+    (`atomic` `(` $mem_order^ `)`)?
     $value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr))
   }];
 
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h 
b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
index fead5725d183d..17fddaee871b3 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
+++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
@@ -113,6 +113,18 @@ LLVM_ATTRIBUTE_UNUSED static bool 
isValidLinkage(GlobalLinkageKind gl) {
          isLinkOnceLinkage(gl);
 }
 
+bool operator<(cir::MemOrder, cir::MemOrder) = delete;
+bool operator>(cir::MemOrder, cir::MemOrder) = delete;
+bool operator<=(cir::MemOrder, cir::MemOrder) = delete;
+bool operator>=(cir::MemOrder, cir::MemOrder) = delete;
+
+// Validate an integral value which isn't known to fit within the enum's range
+// is a valid AtomicOrderingCABI.
+template <typename Int> inline bool isValidCIRAtomicOrderingCABI(Int value) {
+  return static_cast<Int>(cir::MemOrder::Relaxed) <= value &&
+         value <= static_cast<Int>(cir::MemOrder::SequentiallyConsistent);
+}
+
 } // namespace cir
 
 #endif // CLANG_CIR_DIALECT_IR_CIROPSENUMS_H
diff --git a/clang/include/clang/CIR/MissingFeatures.h 
b/clang/include/clang/CIR/MissingFeatures.h
index 805c43e6d5054..2526b1644e0ad 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -52,7 +52,6 @@ struct MissingFeatures {
   static bool opLoadEmitScalarRangeCheck() { return false; }
   static bool opLoadBooleanRepresentation() { return false; }
   static bool opLoadStoreTbaa() { return false; }
-  static bool opLoadStoreMemOrder() { return false; }
   static bool opLoadStoreVolatile() { return false; }
   static bool opLoadStoreAtomic() { return false; }
   static bool opLoadStoreObjC() { return false; }
@@ -167,6 +166,8 @@ struct MissingFeatures {
   static bool atomicInfoGetAtomicPointer() { return false; }
   static bool atomicInfoGetAtomicAddress() { return false; }
   static bool atomicUseLibCall() { return false; }
+  static bool atomicScope() { return false; }
+  static bool atomicSyncScopeID() { return false; }
 
   // Misc
   static bool abiArgInfo() { return false; }
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h
index 6c927e9eda9cc..a851d06321cc1 100644
--- a/clang/lib/CIR/CodeGen/Address.h
+++ b/clang/lib/CIR/CodeGen/Address.h
@@ -68,6 +68,12 @@ class Address {
     return pointerAndKnownNonNull.getPointer() != nullptr;
   }
 
+  /// Return address with different pointer, but same element type and
+  /// alignment.
+  Address withPointer(mlir::Value newPtr) const {
+    return Address(newPtr, getElementType(), getAlignment());
+  }
+
   /// Return address with different element type, a bitcast pointer, and
   /// the same alignment.
   Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const;
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 979085f037d4f..7c79f93b88de3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -96,6 +96,15 @@ class AtomicInfo {
 
   bool emitMemSetZeroIfNecessary() const;
 
+  /// Cast the given pointer to an integer pointer suitable for atomic
+  /// operations on the source.
+  Address castToAtomicIntPointer(Address addr) const;
+
+  /// If addr is compatible with the iN that will be used for an atomic
+  /// operation, bitcast it. Otherwise, create a temporary that is suitable and
+  /// copy the value across.
+  Address convertToAtomicIntPointer(Address addr) const;
+
   /// Copy an atomic r-value into atomic-layout memory.
   void emitCopyIntoMemory(RValue rvalue) const;
 
@@ -111,11 +120,24 @@ class AtomicInfo {
     return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
   }
 
+  /// Creates temp alloca for intermediate operations on atomic value.
+  Address createTempAlloca() const;
+
 private:
   bool requiresMemSetZero(mlir::Type ty) const;
 };
 } // namespace
 
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static Address emitValToTemp(CIRGenFunction &cgf, Expr *e) {
+  Address declPtr = cgf.createMemTemp(
+      e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
+  cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
+                       /*Init*/ true);
+  return declPtr;
+}
+
 /// Does a store of the given IR type modify the full expected width?
 static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
                            uint64_t expectedSize) {
@@ -147,6 +169,41 @@ bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
   llvm_unreachable("bad evaluation kind");
 }
 
+Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
+  mlir::Type ty = addr.getElementType();
+  uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
+  if (sourceSizeInBits != atomicSizeInBits) {
+    cgf.cgm.errorNYI(
+        loc,
+        "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
+  }
+
+  return castToAtomicIntPointer(addr);
+}
+
+Address AtomicInfo::createTempAlloca() const {
+  Address tempAlloca = cgf.createMemTemp(
+      (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
+                                                                  : atomicTy,
+      getAtomicAlignment(), loc, "atomic-temp");
+
+  // Cast to pointer to value type for bitfields.
+  if (lvalue.isBitField()) {
+    cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
+  }
+
+  return tempAlloca;
+}
+
+Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
+  auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
+  // Don't bother with int casts if the integer size is the same.
+  if (intTy && intTy.getWidth() == atomicSizeInBits)
+    return addr;
+  auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
+  return addr.withElementType(cgf.getBuilder(), ty);
+}
+
 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
   assert(lvalue.isSimple());
   Address addr = lvalue.getAddress();
@@ -187,12 +244,85 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
   }
 }
 
+static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
+                         Address ptr, Address val1, uint64_t size,
+                         cir::MemOrder order) {
+  std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
+  if (scopeModel) {
+    assert(!cir::MissingFeatures::atomicScope());
+    cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
+    return;
+  }
+
+  assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  mlir::Location loc = cgf.getLoc(expr->getSourceRange());
+  auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+
+  switch (expr->getOp()) {
+  default:
+    cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
+    break;
+
+  case AtomicExpr::AO__c11_atomic_init:
+    llvm_unreachable("already handled!");
+
+  case AtomicExpr::AO__c11_atomic_load:
+  case AtomicExpr::AO__atomic_load: {
+    cir::LoadOp load =
+        builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
+
+    assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+    load->setAttr("mem_order", orderAttr);
+
+    // TODO(cir): this logic should be part of createStore, but doing so
+    // currently breaks CodeGen/union.cpp and CodeGen/union.cpp.
+    auto ptrTy = mlir::cast<cir::PointerType>(dest.getPointer().getType());
+    if (dest.getElementType() != ptrTy.getPointee()) {
+      dest = dest.withPointer(
+          builder.createPtrBitcast(dest.getPointer(), dest.getElementType()));
+    }
+    builder.createStore(loc, load->getResult(0), dest);
+    return;
+  }
+
+  case AtomicExpr::AO__c11_atomic_store:
+  case AtomicExpr::AO__atomic_store: {
+    cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
+
+    assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+    builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
+                        /*align=*/mlir::IntegerAttr{}, orderAttr);
+    return;
+  }
+  }
+}
+
+static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
+  if (!cir::isValidCIRAtomicOrderingCABI(order))
+    return false;
+  auto memOrder = static_cast<cir::MemOrder>(order);
+  if (isStore)
+    return memOrder != cir::MemOrder::Consume &&
+           memOrder != cir::MemOrder::Acquire &&
+           memOrder != cir::MemOrder::AcquireRelease;
+  if (isLoad)
+    return memOrder != cir::MemOrder::Release &&
+           memOrder != cir::MemOrder::AcquireRelease;
+  return true;
+}
+
 RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
   QualType atomicTy = e->getPtr()->getType()->getPointeeType();
   QualType memTy = atomicTy;
   if (const auto *ty = atomicTy->getAs<AtomicType>())
     memTy = ty->getValueType();
 
+  Address val1 = Address::invalid();
+  Address dest = Address::invalid();
   Address ptr = emitPointerWithAlignment(e->getPtr());
 
   assert(!cir::MissingFeatures::openCL());
@@ -202,9 +332,105 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
     return RValue::get(nullptr);
   }
 
-  assert(!cir::MissingFeatures::atomicExpr());
-  cgm.errorNYI(e->getSourceRange(), "atomic expr is NYI");
-  return RValue::get(nullptr);
+  TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
+  uint64_t size = typeInfo.Width.getQuantity();
+
+  Expr::EvalResult orderConst;
+  mlir::Value order;
+  if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
+    order = emitScalarExpr(e->getOrder());
+
+  bool shouldCastToIntPtrTy = true;
+
+  switch (e->getOp()) {
+  default:
+    cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
+    return RValue::get(nullptr);
+
+  case AtomicExpr::AO__c11_atomic_init:
+    llvm_unreachable("already handled above with emitAtomicInit");
+
+  case AtomicExpr::AO__c11_atomic_load:
+    break;
+
+  case AtomicExpr::AO__atomic_load:
+    dest = emitPointerWithAlignment(e->getVal1());
+    break;
+
+  case AtomicExpr::AO__atomic_store:
+    val1 = emitPointerWithAlignment(e->getVal1());
+    break;
+
+  case AtomicExpr::AO__c11_atomic_store:
+    val1 = emitValToTemp(*this, e->getVal1());
+    break;
+  }
+
+  QualType resultTy = e->getType().getUnqualifiedType();
+
+  // The inlined atomics only function on iN types, where N is a power of 2. We
+  // need to make sure (via temporaries if necessary) that all incoming values
+  // are compatible.
+  LValue atomicValue = makeAddrLValue(ptr, atomicTy);
+  AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
+
+  if (shouldCastToIntPtrTy) {
+    ptr = atomics.castToAtomicIntPointer(ptr);
+    if (val1.isValid())
+      val1 = atomics.convertToAtomicIntPointer(val1);
+  }
+  if (dest.isValid()) {
+    if (shouldCastToIntPtrTy)
+      dest = atomics.castToAtomicIntPointer(dest);
+  } else if (!resultTy->isVoidType()) {
+    dest = atomics.createTempAlloca();
+    if (shouldCastToIntPtrTy)
+      dest = atomics.castToAtomicIntPointer(dest);
+  }
+
+  bool powerOf2Size = (size & (size - 1)) == 0;
+  bool useLibCall = !powerOf2Size || (size > 16);
+
+  // For atomics larger than 16 bytes, emit a libcall from the frontend. This
+  // avoids the overhead of dealing with excessively-large value types in IR.
+  // Non-power-of-2 values also lower to libcall here, as they are not 
currently
+  // permitted in IR instructions (although that constraint could be relaxed in
+  // the future). For other cases where a libcall is required on a given
+  // platform, we let the backend handle it (this includes handling for all of
+  // the size-optimized libcall variants, which are only valid up to 16 bytes.)
+  //
+  // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
+  if (useLibCall) {
+    assert(!cir::MissingFeatures::atomicUseLibCall());
+    cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
+    return RValue::get(nullptr);
+  }
+
+  bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
+                 e->getOp() == AtomicExpr::AO__atomic_store;
+  bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
+                e->getOp() == AtomicExpr::AO__atomic_load;
+
+  if (!order) {
+    // We have evaluated the memory order as an integer constant in orderConst.
+    // We should not ever get to a case where the ordering isn't a valid CABI
+    // value, but it's hard to enforce that in general.
+    uint64_t ord = orderConst.Val.getInt().getZExtValue();
+    if (isMemOrderValid(ord, isStore, isLoad))
+      emitAtomicOp(*this, e, dest, ptr, val1, size,
+                   static_cast<cir::MemOrder>(ord));
+  } else {
+    assert(!cir::MissingFeatures::atomicExpr());
+    cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
+    return RValue::get(nullptr);
+  }
+
+  if (resultTy->isVoidType())
+    return RValue::get(nullptr);
+
+  return convertTempToRValue(
+      dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
+      e->getExprLoc());
 }
 
 void CIRGenFunction::emitAtomicInit(Expr *init, LValue dest) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h 
b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 8b2538c941f47..655ffeb0d45c3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -348,15 +348,19 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
   cir::LoadOp createLoad(mlir::Location loc, Address addr,
                          bool isVolatile = false) {
     mlir::IntegerAttr align = getAlignmentAttr(addr.getAlignment());
-    return create<cir::LoadOp>(loc, addr.getPointer(), /*isDeref=*/false,
-                               align);
+    return cir::LoadOp::create(*this, loc, addr.getPointer(), 
/*isDeref=*/false,
+                               /*alignment=*/align,
+                               /*mem_order=*/cir::MemOrderAttr{});
   }
 
   cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst,
-                           mlir::IntegerAttr align = {}) {
+                           bool isVolatile = false,
+                           mlir::IntegerAttr align = {},
+                           cir::MemOrderAttr order = {}) {
     if (!align)
       align = getAlignmentAttr(dst.getAlignment());
-    return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), align);
+    return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), 
isVolatile,
+                                         align, order);
   }
 
   /// Create a cir.complex.real_ptr operation that derives a pointer to the 
real
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index ddc1edd77010c..2bcf8e24fc365 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -1369,7 +1369,7 @@ class CIRGenFunction : public CIRGenTypeCache {
       mlir::OpBuilder::InsertionGuard guard(builder);
       builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
       builder.createStore(
-          value.getLoc(), value, addr,
+          value.getLoc(), value, addr, /*isVolatile=*/false,
           mlir::IntegerAttr::get(
               mlir::IntegerType::get(value.getContext(), 64),
               (uint64_t)addr.getAlignment().getAsAlign().value()));
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index ad5f52034f92a..d99e2e89da537 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1097,12 +1097,33 @@ mlir::LogicalResult 
CIRToLLVMCallOpLowering::matchAndRewrite(
                              getTypeConverter(), op.getCalleeAttr());
 }
 
+static mlir::LLVM::AtomicOrdering
+getLLVMMemOrder(std::optional<cir::MemOrder> memorder) {
+  if (!memorder)
+    return mlir::LLVM::AtomicOrdering::not_atomic;
+  switch (*memorder) {
+  case cir::MemOrder::Relaxed:
+    return mlir::LLVM::AtomicOrdering::monotonic;
+  case cir::MemOrder::Consume:
+  case cir::MemOrder::Acquire:
+    return mlir::LLVM::AtomicOrdering::acquire;
+  case cir::MemOrder::Release:
+    return mlir::LLVM::AtomicOrdering::release;
+  case cir::MemOrder::AcquireRelease:
+    return mlir::LLVM::AtomicOrdering::acq_rel;
+  case cir::MemOrder::SequentiallyConsistent:
+    return mlir::LLVM::AtomicOrdering::seq_cst;
+  default:
+    llvm_unreachable("unknown memory order");
+  }
+}
+
 mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
     cir::LoadOp op,...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/153814
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to