https://github.com/Lancern updated 
https://github.com/llvm/llvm-project/pull/156253

>From af79b902dc043ee85d02015cce912375bdaf4cf3 Mon Sep 17 00:00:00 2001
From: Sirui Mu <msrlanc...@gmail.com>
Date: Mon, 1 Sep 2025 00:34:29 +0800
Subject: [PATCH] [CIR] Add support for atomic compare-and-swap

This patch adds support for atomic compare-and-swap operations, including the
following C/C++ instrinsics:

  - __atomic_compare_exchange
  - __atomic_compare_exchange_n
  - __c11_atomic_compare_exchange_strong
  - __c11_atomic_compare_exchange_weak
---
 clang/include/clang/CIR/Dialect/IR/CIROps.td  |  73 ++++++
 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp        | 144 +++++++++++-
 clang/lib/CIR/Dialect/IR/CIRDialect.cpp       |  14 ++
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp |  66 ++++--
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h   |  10 +
 clang/test/CIR/CodeGen/atomic.c               | 211 ++++++++++++++++++
 6 files changed, 491 insertions(+), 27 deletions(-)

diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 4592078af966b..bbbd10bfd8ad6 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -4000,4 +4000,77 @@ def CIR_ThrowOp : CIR_Op<"throw"> {
   let hasVerifier = 1;
 }
 
+//===----------------------------------------------------------------------===//
+// Atomic operations
+//===----------------------------------------------------------------------===//
+
+def CIR_AtomicCmpXchg : CIR_Op<"atomic.cmpxchg", [
+  AllTypesMatch<["old", "expected", "desired"]>
+]> {
+  let summary = "Atomic compare and exchange";
+  let description = [{
+    C/C++ atomic compare and exchange operation. Implements builtins like
+    `__atomic_compare_exchange_n` and `__atomic_compare_exchange`.
+
+    This operation takes three arguments: a pointer `ptr` and two values
+    `expected` and `desired`. This operation compares the value of the object
+    pointed-to by `ptr` with `expected`, and if they are equal, it sets the
+    value of the object to `desired`.
+
+    The `succ_order` attribute gives the memory order of this atomic operation
+    when the exchange takes place. The `fail_order` attribute gives the memory
+    order of this atomic operation when the exchange does not take place.
+
+    The `weak` attribute is a boolean flag that indicates whether this is a
+    "weak" compare-and-exchange operation. A weak compare-and-exchange 
operation
+    allows "spurious failures", meaning that be treated as if the comparison
+    failed and not exchange values even if `*ptr` and `expected` indeed compare
+    equal.
+    
+    The type of `expected` and `desired` must be the same. The pointee type of
+    `ptr` must be the same as the type of `expected` and `desired`.
+
+    This operation has two results. The first result `old` gives the old value
+    of the object pointed-to by `ptr`, regardless of whether the exchange
+    actually took place. The second result `success` is a boolean flag
+    indicating whether the exchange actually took place.
+
+    Example:
+
+    ```mlir
+    %old, %success = cir.atomic.cmpxchg(%ptr : !cir.ptr<!u64i>,
+                                        %expected : !u64i,
+                                        %desired : !u64i,
+                                        success = seq_cst,
+                                        failure = seq_cst) weak
+                                        : (!u64i, !cir.bool)
+    ```
+  }];
+  let results = (outs CIR_AnyType:$old, CIR_BoolType:$success);
+  let arguments = (ins Arg<CIR_PointerType, "", [MemRead, MemWrite]>:$ptr,
+                       CIR_AnyType:$expected,
+                       CIR_AnyType:$desired,
+                       Arg<CIR_MemOrder, "success memory order">:$succ_order,
+                       Arg<CIR_MemOrder, "failure memory order">:$fail_order,
+                       OptionalAttr<I64Attr>:$alignment,
+                       UnitAttr:$weak,
+                       UnitAttr:$is_volatile);
+
+  let assemblyFormat = [{
+    `(`
+      $ptr `:` qualified(type($ptr)) `,`
+      $expected `:` type($expected) `,`
+      $desired `:` type($desired) `,`
+      `success` `=`  $succ_order `,`
+      `failure` `=`  $fail_order
+    `)`
+    (`align` `(` $alignment^ `)`)?
+    (`weak` $weak^)?
+    (`volatile` $is_volatile^)?
+    `:` `(` type($old) `,` type($success) `)` attr-dict
+  }];
+
+  let hasVerifier = 1;
+}
+
 #endif // CLANG_CIR_DIALECT_IR_CIROPS_TD
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 3b78e6e22d2a7..86ba0299af3cf 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -244,8 +244,94 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
   }
 }
 
+static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
+                              Address dest, Address ptr, Address val1,
+                              Address val2, uint64_t size,
+                              cir::MemOrder successOrder,
+                              cir::MemOrder failureOrder) {
+  mlir::Location loc = cgf.getLoc(e->getSourceRange());
+
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  mlir::Value expected = builder.createLoad(loc, val1);
+  mlir::Value desired = builder.createLoad(loc, val2);
+
+  auto cmpxchg = cir::AtomicCmpXchg::create(
+      builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
+      expected, desired,
+      cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
+      cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
+      builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
+
+  cmpxchg.setIsVolatile(e->isVolatile());
+  cmpxchg.setWeak(isWeak);
+
+  mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
+  cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
+                    [&](mlir::OpBuilder &, mlir::Location) {
+                      auto ptrTy = mlir::cast<cir::PointerType>(
+                          val1.getPointer().getType());
+                      if (val1.getElementType() != ptrTy.getPointee()) {
+                        val1 = val1.withPointer(builder.createPtrBitcast(
+                            val1.getPointer(), val1.getElementType()));
+                      }
+                      builder.createStore(loc, cmpxchg.getOld(), val1);
+                      builder.createYield(loc);
+                    });
+
+  // Update the memory at Dest with Success's value.
+  cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
+                        cgf.makeAddrLValue(dest, e->getType()),
+                        /*isInit=*/false);
+}
+
+static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e,
+                                        bool isWeak, Address dest, Address ptr,
+                                        Address val1, Address val2,
+                                        Expr *failureOrderExpr, uint64_t size,
+                                        cir::MemOrder successOrder) {
+  Expr::EvalResult failureOrderEval;
+  if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
+    uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
+
+    cir::MemOrder failureOrder;
+    if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
+      failureOrder = cir::MemOrder::Relaxed;
+    } else {
+      switch ((cir::MemOrder)failureOrderInt) {
+      case cir::MemOrder::Relaxed:
+        // 31.7.2.18: "The failure argument shall not be memory_order_release
+        // nor memory_order_acq_rel". Fallback to monotonic.
+      case cir::MemOrder::Release:
+      case cir::MemOrder::AcquireRelease:
+        failureOrder = cir::MemOrder::Relaxed;
+        break;
+      case cir::MemOrder::Consume:
+      case cir::MemOrder::Acquire:
+        failureOrder = cir::MemOrder::Acquire;
+        break;
+      case cir::MemOrder::SequentiallyConsistent:
+        failureOrder = cir::MemOrder::SequentiallyConsistent;
+        break;
+      }
+    }
+
+    // Prior to c++17, "the failure argument shall be no stronger than the
+    // success argument". This condition has been lifted and the only
+    // precondition is 31.7.2.18. Effectively treat this as a DR and skip
+    // language version checks.
+    emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, 
successOrder,
+                      failureOrder);
+    return;
+  }
+
+  assert(!cir::MissingFeatures::atomicExpr());
+  cgf.cgm.errorNYI(e->getSourceRange(),
+                   "emitAtomicCmpXchgFailureSet: non-constant failure order");
+}
+
 static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
-                         Address ptr, Address val1, uint64_t size,
+                         Address ptr, Address val1, Address val2,
+                         Expr *isWeakExpr, Expr *failureOrderExpr, int64_t 
size,
                          cir::MemOrder order) {
   std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
   if (scopeModel) {
@@ -264,6 +350,30 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
   case AtomicExpr::AO__c11_atomic_init:
     llvm_unreachable("already handled!");
 
+  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+    emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
+                                val2, failureOrderExpr, size, order);
+    return;
+
+  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+    emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
+                                val2, failureOrderExpr, size, order);
+    return;
+
+  case AtomicExpr::AO__atomic_compare_exchange:
+  case AtomicExpr::AO__atomic_compare_exchange_n: {
+    bool isWeak = false;
+    if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
+      emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
+                                  failureOrderExpr, size, order);
+    } else {
+      assert(!cir::MissingFeatures::atomicExpr());
+      cgf.cgm.errorNYI(expr->getSourceRange(),
+                       "emitAtomicOp: non-constant isWeak");
+    }
+    return;
+  }
+
   case AtomicExpr::AO__c11_atomic_load:
   case AtomicExpr::AO__atomic_load_n:
   case AtomicExpr::AO__atomic_load: {
@@ -292,16 +402,12 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
 
   case AtomicExpr::AO__opencl_atomic_init:
 
-  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
 
-  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
 
-  case AtomicExpr::AO__atomic_compare_exchange:
-  case AtomicExpr::AO__atomic_compare_exchange_n:
   case AtomicExpr::AO__scoped_atomic_compare_exchange:
   case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
 
@@ -421,7 +527,11 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
   if (const auto *ty = atomicTy->getAs<AtomicType>())
     memTy = ty->getValueType();
 
+  Expr *isWeakExpr = nullptr;
+  Expr *orderFailExpr = nullptr;
+
   Address val1 = Address::invalid();
+  Address val2 = Address::invalid();
   Address dest = Address::invalid();
   Address ptr = emitPointerWithAlignment(e->getPtr());
 
@@ -462,6 +572,24 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
     val1 = emitPointerWithAlignment(e->getVal1());
     break;
 
+  case AtomicExpr::AO__atomic_compare_exchange:
+  case AtomicExpr::AO__atomic_compare_exchange_n:
+  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+    val1 = emitPointerWithAlignment(e->getVal1());
+    if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+        e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
+      val2 = emitPointerWithAlignment(e->getVal2());
+    else
+      val2 = emitValToTemp(*this, e->getVal2());
+    orderFailExpr = e->getOrderFail();
+    if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
+        e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+        e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
+        e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
+      isWeakExpr = e->getWeak();
+    break;
+
   case AtomicExpr::AO__atomic_store_n:
   case AtomicExpr::AO__c11_atomic_store:
     val1 = emitValToTemp(*this, e->getVal1());
@@ -484,6 +612,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
   if (dest.isValid()) {
     if (shouldCastToIntPtrTy)
       dest = atomics.castToAtomicIntPointer(dest);
+  } else if (e->isCmpXChg()) {
+    dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), 
"cmpxchg.bool");
   } else if (!resultTy->isVoidType()) {
     dest = atomics.createTempAlloca();
     if (shouldCastToIntPtrTy)
@@ -530,8 +660,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
     // value, but it's hard to enforce that in general.
     uint64_t ord = orderConst.Val.getInt().getZExtValue();
     if (isMemOrderValid(ord, isStore, isLoad))
-      emitAtomicOp(*this, e, dest, ptr, val1, size,
-                   static_cast<cir::MemOrder>(ord));
+      emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
+                   size, static_cast<cir::MemOrder>(ord));
   } else {
     assert(!cir::MissingFeatures::atomicExpr());
     cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp 
b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index bccc0da588a0d..24aef693024f7 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2730,6 +2730,20 @@ mlir::LogicalResult cir::ThrowOp::verify() {
   return failure();
 }
 
+//===----------------------------------------------------------------------===//
+// AtomicCmpXchg
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::AtomicCmpXchg::verify() {
+  mlir::Type pointeeType = getPtr().getType().getPointee();
+
+  if (pointeeType != getExpected().getType() ||
+      pointeeType != getDesired().getType())
+    return emitOpError("ptr, expected and desired types must match");
+
+  return success();
+}
+
 
//===----------------------------------------------------------------------===//
 // TableGen'd op method definitions
 
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index ee9f58c829ca9..8840d6b8127f0 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -640,6 +640,51 @@ mlir::LogicalResult 
CIRToLLVMAssumeSepStorageOpLowering::matchAndRewrite(
   return mlir::success();
 }
 
+static mlir::LLVM::AtomicOrdering
+getLLVMMemOrder(std::optional<cir::MemOrder> memorder) {
+  if (!memorder)
+    return mlir::LLVM::AtomicOrdering::not_atomic;
+  switch (*memorder) {
+  case cir::MemOrder::Relaxed:
+    return mlir::LLVM::AtomicOrdering::monotonic;
+  case cir::MemOrder::Consume:
+  case cir::MemOrder::Acquire:
+    return mlir::LLVM::AtomicOrdering::acquire;
+  case cir::MemOrder::Release:
+    return mlir::LLVM::AtomicOrdering::release;
+  case cir::MemOrder::AcquireRelease:
+    return mlir::LLVM::AtomicOrdering::acq_rel;
+  case cir::MemOrder::SequentiallyConsistent:
+    return mlir::LLVM::AtomicOrdering::seq_cst;
+  }
+  llvm_unreachable("unknown memory order");
+}
+
+mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite(
+    cir::AtomicCmpXchg op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  mlir::Value expected = adaptor.getExpected();
+  mlir::Value desired = adaptor.getDesired();
+
+  auto cmpxchg = mlir::LLVM::AtomicCmpXchgOp::create(
+      rewriter, op.getLoc(), adaptor.getPtr(), expected, desired,
+      getLLVMMemOrder(adaptor.getSuccOrder()),
+      getLLVMMemOrder(adaptor.getFailOrder()));
+  assert(!cir::MissingFeatures::atomicScope());
+  cmpxchg.setAlignment(adaptor.getAlignment());
+  cmpxchg.setWeak(adaptor.getWeak());
+  cmpxchg.setVolatile_(adaptor.getIsVolatile());
+
+  // Check result and apply stores accordingly.
+  auto old = mlir::LLVM::ExtractValueOp::create(rewriter, op.getLoc(),
+                                                cmpxchg.getResult(), 0);
+  auto cmp = mlir::LLVM::ExtractValueOp::create(rewriter, op.getLoc(),
+                                                cmpxchg.getResult(), 1);
+
+  rewriter.replaceOp(op, {old, cmp});
+  return mlir::success();
+}
+
 mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite(
     cir::BitClrsbOp op, OpAdaptor adaptor,
     mlir::ConversionPatternRewriter &rewriter) const {
@@ -1202,26 +1247,6 @@ mlir::LogicalResult 
CIRToLLVMFrameAddrOpLowering::matchAndRewrite(
   return mlir::success();
 }
 
-static mlir::LLVM::AtomicOrdering
-getLLVMMemOrder(std::optional<cir::MemOrder> memorder) {
-  if (!memorder)
-    return mlir::LLVM::AtomicOrdering::not_atomic;
-  switch (*memorder) {
-  case cir::MemOrder::Relaxed:
-    return mlir::LLVM::AtomicOrdering::monotonic;
-  case cir::MemOrder::Consume:
-  case cir::MemOrder::Acquire:
-    return mlir::LLVM::AtomicOrdering::acquire;
-  case cir::MemOrder::Release:
-    return mlir::LLVM::AtomicOrdering::release;
-  case cir::MemOrder::AcquireRelease:
-    return mlir::LLVM::AtomicOrdering::acq_rel;
-  case cir::MemOrder::SequentiallyConsistent:
-    return mlir::LLVM::AtomicOrdering::seq_cst;
-  }
-  llvm_unreachable("unknown memory order");
-}
-
 mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
     cir::LoadOp op, OpAdaptor adaptor,
     mlir::ConversionPatternRewriter &rewriter) const {
@@ -2430,6 +2455,7 @@ void ConvertCIRToLLVMPass::runOnOperation() {
                CIRToLLVMAssumeOpLowering,
                CIRToLLVMAssumeAlignedOpLowering,
                CIRToLLVMAssumeSepStorageOpLowering,
+               CIRToLLVMAtomicCmpXchgLowering,
                CIRToLLVMBaseClassAddrOpLowering,
                CIRToLLVMBinOpLowering,
                CIRToLLVMBitClrsbOpLowering,
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index 2c2aede09b0b2..cf98baf690900 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -134,6 +134,16 @@ class CIRToLLVMBitReverseOpLowering
                   mlir::ConversionPatternRewriter &) const override;
 };
 
+class CIRToLLVMAtomicCmpXchgLowering
+    : public mlir::OpConversionPattern<cir::AtomicCmpXchg> {
+public:
+  using mlir::OpConversionPattern<cir::AtomicCmpXchg>::OpConversionPattern;
+
+  mlir::LogicalResult
+  matchAndRewrite(cir::AtomicCmpXchg op, OpAdaptor,
+                  mlir::ConversionPatternRewriter &) const override;
+};
+
 class CIRToLLVMBrCondOpLowering
     : public mlir::OpConversionPattern<cir::BrCondOp> {
 public:
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 8b947f795d1d4..0eba2959c0ebc 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -204,3 +204,214 @@ void c11_store(_Atomic(int) *ptr, int x) {
 // OGCG:   store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
 // OGCG: }
 
+void c11_atomic_cmpxchg_strong(_Atomic(int) *ptr, int *expected, int desired) {
+  // CIR-LABEL: @c11_atomic_cmpxchg_strong
+  // LLVM-LABEL: @c11_atomic_cmpxchg_strong
+  // OGCG-LABEL: @c11_atomic_cmpxchg_strong
+
+  __c11_atomic_compare_exchange_strong(ptr, expected, desired,
+                                       __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg(%{{.+}} : 
!cir.ptr<!s32i>, %{{.+}} : !s32i, %{{.+}} : !s32i, success = seq_cst, failure = 
acquire) align(4) : (!s32i, !cir.bool)
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
+  // CIR-NEXT:    cir.if %[[FAILED]] {
+  // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:    }
+  // CIR-NEXT:    cir.store align(1) %[[SUCCESS]], %{{.+}} : !cir.bool, 
!cir.ptr<!cir.bool>
+
+  // LLVM:         %[[RESULT:.+]] = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // LLVM-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // LLVM-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // LLVM-NEXT:    %[[FAILED:.+]] = xor i1 %[[SUCCESS]], true
+  // LLVM-NEXT:    br i1 %[[FAILED]], label %[[LABEL_FAILED:.+]], label 
%[[LABEL_CONT:.+]]
+  // LLVM:       [[LABEL_FAILED]]:
+  // LLVM-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // LLVM-NEXT:    br label %[[LABEL_CONT]]
+  // LLVM:       [[LABEL_CONT]]:
+  // LLVM-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // LLVM-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+  // OGCG:         %[[RESULT:.+]] = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // OGCG-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // OGCG-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // OGCG-NEXT:    br i1 %[[SUCCESS]], label %[[LABEL_CONT:.+]], label 
%[[LABEL_FAILED:.+]]
+  // OGCG:       [[LABEL_FAILED]]:
+  // OGCG-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // OGCG-NEXT:    br label %[[LABEL_CONT]]
+  // OGCG:       [[LABEL_CONT]]:
+  // OGCG-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+}
+
+void c11_atomic_cmpxchg_weak(_Atomic(int) *ptr, int *expected, int desired) {
+  // CIR-LABEL: @c11_atomic_cmpxchg_weak
+  // LLVM-LABEL: @c11_atomic_cmpxchg_weak
+  // OGCG-LABEL: @c11_atomic_cmpxchg_weak
+
+  __c11_atomic_compare_exchange_weak(ptr, expected, desired,
+                                     __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg(%{{.+}} : 
!cir.ptr<!s32i>, %{{.+}} : !s32i, %{{.+}} : !s32i, success = seq_cst, failure = 
acquire) align(4) weak : (!s32i, !cir.bool)
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
+  // CIR-NEXT:    cir.if %[[FAILED]] {
+  // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:    }
+  // CIR-NEXT:    cir.store align(1) %[[SUCCESS]], %{{.+}} : !cir.bool, 
!cir.ptr<!cir.bool>
+
+  // LLVM:         %[[RESULT:.+]] = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // LLVM-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // LLVM-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // LLVM-NEXT:    %[[FAILED:.+]] = xor i1 %[[SUCCESS]], true
+  // LLVM-NEXT:    br i1 %[[FAILED]], label %[[LABEL_FAILED:.+]], label 
%[[LABEL_CONT:.+]]
+  // LLVM:       [[LABEL_FAILED]]:
+  // LLVM-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // LLVM-NEXT:    br label %[[LABEL_CONT]]
+  // LLVM:       [[LABEL_CONT]]:
+  // LLVM-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // LLVM-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+  // OGCG:         %[[RESULT:.+]] = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // OGCG-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // OGCG-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // OGCG-NEXT:    br i1 %[[SUCCESS]], label %[[LABEL_CONT:.+]], label 
%[[LABEL_FAILED:.+]]
+  // OGCG:       [[LABEL_FAILED]]:
+  // OGCG-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // OGCG-NEXT:    br label %[[LABEL_CONT]]
+  // OGCG:       [[LABEL_CONT]]:
+  // OGCG-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+}
+
+void atomic_cmpxchg(int *ptr, int *expected, int *desired) {
+  // CIR-LABEL: @atomic_cmpxchg
+  // LLVM-LABEL: @atomic_cmpxchg
+  // OGCG-LABEL: @atomic_cmpxchg
+
+  __atomic_compare_exchange(ptr, expected, desired, /*weak=*/0, 
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg(%{{.+}} : 
!cir.ptr<!s32i>, %{{.+}} : !s32i, %{{.+}} : !s32i, success = seq_cst, failure = 
acquire) align(4) : (!s32i, !cir.bool)
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
+  // CIR-NEXT:    cir.if %[[FAILED]] {
+  // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:    }
+  // CIR-NEXT:    cir.store align(1) %[[SUCCESS]], %{{.+}} : !cir.bool, 
!cir.ptr<!cir.bool>
+
+  // LLVM:         %[[RESULT:.+]] = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // LLVM-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // LLVM-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // LLVM-NEXT:    %[[FAILED:.+]] = xor i1 %[[SUCCESS]], true
+  // LLVM-NEXT:    br i1 %[[FAILED]], label %[[LABEL_FAILED:.+]], label 
%[[LABEL_CONT:.+]]
+  // LLVM:       [[LABEL_FAILED]]:
+  // LLVM-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // LLVM-NEXT:    br label %[[LABEL_CONT]]
+  // LLVM:       [[LABEL_CONT]]:
+  // LLVM-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // LLVM-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+  // OGCG:         %[[RESULT:.+]] = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // OGCG-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // OGCG-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // OGCG-NEXT:    br i1 %[[SUCCESS]], label %[[LABEL_CONT:.+]], label 
%[[LABEL_FAILED:.+]]
+  // OGCG:       [[LABEL_FAILED]]:
+  // OGCG-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // OGCG-NEXT:    br label %[[LABEL_CONT]]
+  // OGCG:       [[LABEL_CONT]]:
+  // OGCG-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+  __atomic_compare_exchange(ptr, expected, desired, /*weak=*/1, 
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg(%{{.+}} : 
!cir.ptr<!s32i>, %{{.+}} : !s32i, %{{.+}} : !s32i, success = seq_cst, failure = 
acquire) align(4) weak : (!s32i, !cir.bool)
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
+  // CIR-NEXT:    cir.if %[[FAILED]] {
+  // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:    }
+  // CIR-NEXT:    cir.store align(1) %[[SUCCESS]], %{{.+}} : !cir.bool, 
!cir.ptr<!cir.bool>
+
+  // LLVM:         %[[RESULT:.+]] = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // LLVM-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // LLVM-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // LLVM-NEXT:    %[[FAILED:.+]] = xor i1 %[[SUCCESS]], true
+  // LLVM-NEXT:    br i1 %[[FAILED]], label %[[LABEL_FAILED:.+]], label 
%[[LABEL_CONT:.+]]
+  // LLVM:       [[LABEL_FAILED]]:
+  // LLVM-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // LLVM-NEXT:    br label %[[LABEL_CONT]]
+  // LLVM:       [[LABEL_CONT]]:
+  // LLVM-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // LLVM-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+  // OGCG:         %[[RESULT:.+]] = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // OGCG-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // OGCG-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // OGCG-NEXT:    br i1 %[[SUCCESS]], label %[[LABEL_CONT:.+]], label 
%[[LABEL_FAILED:.+]]
+  // OGCG:       [[LABEL_FAILED]]:
+  // OGCG-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // OGCG-NEXT:    br label %[[LABEL_CONT]]
+  // OGCG:       [[LABEL_CONT]]:
+  // OGCG-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+}
+
+void atomic_cmpxchg_n(int *ptr, int *expected, int desired) {
+  // CIR-LABEL: @atomic_cmpxchg_n
+  // LLVM-LABEL: @atomic_cmpxchg_n
+  // OGCG-LABEL: @atomic_cmpxchg_n
+
+  __atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0, 
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg(%{{.+}} : 
!cir.ptr<!s32i>, %{{.+}} : !s32i, %{{.+}} : !s32i, success = seq_cst, failure = 
acquire) align(4) : (!s32i, !cir.bool)
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
+  // CIR-NEXT:    cir.if %[[FAILED]] {
+  // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:    }
+  // CIR-NEXT:    cir.store align(1) %[[SUCCESS]], %{{.+}} : !cir.bool, 
!cir.ptr<!cir.bool>
+
+  // LLVM:         %[[RESULT:.+]] = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // LLVM-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // LLVM-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // LLVM-NEXT:    %[[FAILED:.+]] = xor i1 %[[SUCCESS]], true
+  // LLVM-NEXT:    br i1 %[[FAILED]], label %[[LABEL_FAILED:.+]], label 
%[[LABEL_CONT:.+]]
+  // LLVM:       [[LABEL_FAILED]]:
+  // LLVM-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // LLVM-NEXT:    br label %[[LABEL_CONT]]
+  // LLVM:       [[LABEL_CONT]]:
+  // LLVM-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // LLVM-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+  // OGCG:         %[[RESULT:.+]] = cmpxchg ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // OGCG-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // OGCG-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // OGCG-NEXT:    br i1 %[[SUCCESS]], label %[[LABEL_CONT:.+]], label 
%[[LABEL_FAILED:.+]]
+  // OGCG:       [[LABEL_FAILED]]:
+  // OGCG-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // OGCG-NEXT:    br label %[[LABEL_CONT]]
+  // OGCG:       [[LABEL_CONT]]:
+  // OGCG-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+  __atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1, 
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+  // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg(%{{.+}} : 
!cir.ptr<!s32i>, %{{.+}} : !s32i, %{{.+}} : !s32i, success = seq_cst, failure = 
acquire) align(4) weak : (!s32i, !cir.bool)
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, 
!cir.bool
+  // CIR-NEXT:    cir.if %[[FAILED]] {
+  // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:    }
+  // CIR-NEXT:    cir.store align(1) %[[SUCCESS]], %{{.+}} : !cir.bool, 
!cir.ptr<!cir.bool>
+
+  // LLVM:         %[[RESULT:.+]] = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // LLVM-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // LLVM-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // LLVM-NEXT:    %[[FAILED:.+]] = xor i1 %[[SUCCESS]], true
+  // LLVM-NEXT:    br i1 %[[FAILED]], label %[[LABEL_FAILED:.+]], label 
%[[LABEL_CONT:.+]]
+  // LLVM:       [[LABEL_FAILED]]:
+  // LLVM-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // LLVM-NEXT:    br label %[[LABEL_CONT]]
+  // LLVM:       [[LABEL_CONT]]:
+  // LLVM-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // LLVM-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+
+  // OGCG:         %[[RESULT:.+]] = cmpxchg weak ptr %{{.+}}, i32 %{{.+}}, i32 
%{{.+}} seq_cst acquire, align 4
+  // OGCG-NEXT:    %[[OLD:.+]] = extractvalue { i32, i1 } %[[RESULT]], 0
+  // OGCG-NEXT:    %[[SUCCESS:.+]] = extractvalue { i32, i1 } %[[RESULT]], 1
+  // OGCG-NEXT:    br i1 %[[SUCCESS]], label %[[LABEL_CONT:.+]], label 
%[[LABEL_FAILED:.+]]
+  // OGCG:       [[LABEL_FAILED]]:
+  // OGCG-NEXT:    store i32 %[[OLD]], ptr %{{.+}}, align 4
+  // OGCG-NEXT:    br label %[[LABEL_CONT]]
+  // OGCG:       [[LABEL_CONT]]:
+  // OGCG-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
+  // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
+}

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to