https://github.com/Lancern created 
https://github.com/llvm/llvm-project/pull/168892

This patch upstreams CIR support for atomic operations with memory orders that 
are not known at compile time.

>From f7aa316149bdf8c660cdd9b652651fcc2b063862 Mon Sep 17 00:00:00 2001
From: Sirui Mu <[email protected]>
Date: Thu, 20 Nov 2025 23:37:37 +0800
Subject: [PATCH] [CIR] Add support for non-compile-time memory order

This patch upstreams CIR support for atomic operations with memory orders that
are not known at compile time.
---
 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp |  85 +++++++-
 clang/test/CIR/CodeGen/atomic.c        | 266 +++++++++++++++++++++++++
 2 files changed, 348 insertions(+), 3 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index cd4c1f0e5b769..b4a297e99a814 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -286,6 +286,31 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
   }
 }
 
+static void emitMemOrderDefaultCaseLabel(CIRGenBuilderTy &builder,
+                                         mlir::Location loc) {
+  mlir::ArrayAttr ordersAttr = builder.getArrayAttr({});
+  mlir::OpBuilder::InsertPoint insertPoint;
+  cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Default,
+                      insertPoint);
+  builder.restoreInsertionPoint(insertPoint);
+}
+
+// Create a "case" operation with the given list of orders as its values. Also
+// create the region that will hold the body of the switch-case label.
+static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc,
+                                  mlir::Type orderType,
+                                  llvm::ArrayRef<cir::MemOrder> orders) {
+  llvm::SmallVector<mlir::Attribute, 2> orderAttrs;
+  for (cir::MemOrder order : orders)
+    orderAttrs.push_back(cir::IntAttr::get(orderType, 
static_cast<int>(order)));
+  mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
+
+  mlir::OpBuilder::InsertPoint insertPoint;
+  cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
+                      insertPoint);
+  builder.restoreInsertionPoint(insertPoint);
+}
+
 static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
                               Address dest, Address ptr, Address val1,
                               Address val2, uint64_t size,
@@ -844,9 +869,63 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
       emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
                    size, static_cast<cir::MemOrder>(ord));
   } else {
-    assert(!cir::MissingFeatures::atomicExpr());
-    cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
-    return RValue::get(nullptr);
+    // The memory order is not known at compile-time.  The atomic operations
+    // can't handle runtime memory orders; the memory order must be hard coded.
+    // Generate a "switch" statement that converts a runtime value into a
+    // compile-time value.
+    cir::SwitchOp::create(
+        builder, order.getLoc(), order,
+        [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
+          mlir::Block *switchBlock = builder.getBlock();
+
+          auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders,
+                                      cir::MemOrder actualOrder) {
+            if (caseOrders.empty())
+              emitMemOrderDefaultCaseLabel(builder, loc);
+            else
+              emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
+            emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr,
+                         orderFailExpr, size, actualOrder);
+            builder.createBreak(loc);
+            builder.setInsertionPointToEnd(switchBlock);
+          };
+
+          // default:
+          // Use memory_order_relaxed for relaxed operations and for any memory
+          // order value that is not supported.  There is no good way to report
+          // an unsupported memory order at runtime, hence the fallback to
+          // memory_order_relaxed.
+          emitMemOrderCase(/*caseOrders=*/{}, cir::MemOrder::Relaxed);
+
+          if (!isStore) {
+            // case consume:
+            // case acquire:
+            // memory_order_consume is not implemented; it is always treated
+            // like memory_order_acquire.  These memory orders are not valid 
for
+            // write-only operations.
+            emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire},
+                             cir::MemOrder::Acquire);
+          }
+
+          if (!isLoad) {
+            // case release:
+            // memory_order_release is not valid for read-only operations.
+            emitMemOrderCase({cir::MemOrder::Release}, cir::MemOrder::Release);
+          }
+
+          if (!isLoad && !isStore) {
+            // case acq_rel:
+            // memory_order_acq_rel is only valid for read-write operations.
+            emitMemOrderCase({cir::MemOrder::AcquireRelease},
+                             cir::MemOrder::AcquireRelease);
+          }
+
+          // case seq_cst:
+          emitMemOrderCase({cir::MemOrder::SequentiallyConsistent},
+                           cir::MemOrder::SequentiallyConsistent);
+
+          builder.createYield(loc);
+        });
   }
 
   if (resultTy->isVoidType())
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index d5bea8446d730..71cb1f1e164b3 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -1133,3 +1133,269 @@ int c11_atomic_fetch_nand(_Atomic(int) *ptr, int value) 
{
   // OGCG:      %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, 
align 4
   // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
 }
+
+int atomic_load_dynamic_order(int *ptr, int order) {
+  // CIR-LABEL: atomic_load_dynamic_order
+  // LLVM-LABEL: atomic_load_dynamic_order
+  // OGCG-LABEL: atomic_load_dynamic_order
+
+  return __atomic_load_n(ptr, order);
+  
+  // CIR:      %[[PTR:.+]] = cir.load align(8) %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+  // CIR-NEXT: %[[ORDER:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR-NEXT: cir.switch (%[[ORDER]] : !s32i) {
+  // CIR-NEXT:   cir.case(default, []) {
+  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) atomic(relaxed) %[[PTR]] : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT:.+]] : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) atomic(acquire) %[[PTR]] : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.case(anyof, [#cir.int<5> : !s32i]) {
+  // CIR-NEXT:     %[[RES:.+]] = cir.load align(4) atomic(seq_cst) %[[PTR]] : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.yield
+  // CIR-NEXT: }
+  // CIR-NEXT: %{{.+}} = cir.load align(4) %[[RES_SLOT]] : !cir.ptr<!s32i>, 
!s32i
+
+  // LLVM:        %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
+  // LLVM-NEXT:   %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   br label %[[SWITCH_BLK:.+]]
+  // LLVM:      [[SWITCH_BLK]]:
+  // LLVM-NEXT:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // LLVM-NEXT:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // LLVM-NEXT:     i32 2, label %[[ACQUIRE_BLK]]
+  // LLVM-NEXT:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // LLVM-NEXT:   ]
+  // LLVM:      [[DEFAULT_BLK]]:
+  // LLVM-NEXT:   %[[RES:.+]] = load atomic i32, ptr %[[PTR]] monotonic, align 
4
+  // LLVM-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT:.+]], align 4
+  // LLVM-NEXT:   br label %[[CONTINUE_BLK:.+]]
+  // LLVM:      [[ACQUIRE_BLK]]:
+  // LLVM-NEXT:   %[[RES:.+]] = load atomic i32, ptr %[[PTR]] acquire, align 4
+  // LLVM-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // LLVM-NEXT:   br label %[[CONTINUE_BLK]]
+  // LLVM:      [[SEQ_CST_BLK]]:
+  // LLVM-NEXT:   %[[RES:.+]] = load atomic i32, ptr %[[PTR]] seq_cst, align 4
+  // LLVM-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // LLVM-NEXT:   br label %[[CONTINUE_BLK]]
+  // LLVM:      [[CONTINUE_BLK]]:
+  // LLVM-NEXT:   %{{.+}} = load i32, ptr %[[RES_SLOT]], align 4
+
+  // OGCG:        %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
+  // OGCG-NEXT:   %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // OGCG-NEXT:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // OGCG-NEXT:     i32 2, label %[[ACQUIRE_BLK]]
+  // OGCG-NEXT:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // OGCG-NEXT:   ]
+  // OGCG:      [[DEFAULT_BLK]]:
+  // OGCG-NEXT:   %[[RES:.+]] = load atomic i32, ptr %[[PTR]] monotonic, align 
4
+  // OGCG-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT:.+]], align 4
+  // OGCG-NEXT:   br label %[[CONTINUE_BLK:.+]]
+  // OGCG:      [[ACQUIRE_BLK]]:
+  // OGCG-NEXT:   %[[RES:.+]] = load atomic i32, ptr %[[PTR]] acquire, align 4
+  // OGCG-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // OGCG-NEXT:   br label %[[CONTINUE_BLK]]
+  // OGCG:      [[SEQ_CST_BLK]]:
+  // OGCG-NEXT:   %[[RES:.+]] = load atomic i32, ptr %[[PTR]] seq_cst, align 4
+  // OGCG-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // OGCG-NEXT:   br label %[[CONTINUE_BLK]]
+  // OGCG:      [[CONTINUE_BLK]]:
+  // OGCG-NEXT:   %{{.+}} = load i32, ptr %[[RES_SLOT]], align 4
+}
+
+void atomic_store_dynamic_order(int *ptr, int order) {
+  // CIR-LABEL: atomic_store_dynamic_order
+  // LLVM-LABEL: atomic_store_dynamic_order
+  // OGCG-LABEL: atomic_store_dynamic_order
+
+  __atomic_store_n(ptr, 10, order);
+
+  // CIR:      %[[PTR:.+]] = cir.load align(8) %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+  // CIR-NEXT: %[[ORDER:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR:      cir.switch (%[[ORDER]] : !s32i) {
+  // CIR-NEXT:   cir.case(default, []) {
+  // CIR-NEXT:     %[[VALUE:.+]] = cir.load align(4) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     cir.store align(4) atomic(relaxed) %[[VALUE]], %[[PTR]] : 
!s32i, !cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.case(anyof, [#cir.int<3> : !s32i]) {
+  // CIR-NEXT:     %[[VALUE:.+]] = cir.load align(4) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     cir.store align(4) atomic(release) %[[VALUE]], %[[PTR]] : 
!s32i, !cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.case(anyof, [#cir.int<5> : !s32i]) {
+  // CIR-NEXT:     %[[VALUE:.+]] = cir.load align(4) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+  // CIR-NEXT:     cir.store align(4) atomic(seq_cst) %[[VALUE]], %[[PTR]] : 
!s32i, !cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.yield
+  // CIR-NEXT: }
+
+  // LLVM:        %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
+  // LLVM-NEXT:   %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM:        br label %[[SWITCH_BLK:.+]]
+  // LLVM:      [[SWITCH_BLK]]:
+  // LLVM-NEXT:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // LLVM-NEXT:     i32 3, label %[[RELEASE_BLK:.+]]
+  // LLVM-NEXT:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // LLVM-NEXT:   ]
+  // LLVM:      [[DEFAULT_BLK]]:
+  // LLVM-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   store atomic i32 %[[VALUE]], ptr %[[PTR]] monotonic, align 4
+  // LLVM-NEXT:   br label %{{.+}}
+  // LLVM:      [[RELEASE_BLK]]:
+  // LLVM-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   store atomic i32 %[[VALUE]], ptr %[[PTR]] release, align 4
+  // LLVM-NEXT:   br label %{{.+}}
+  // LLVM:      [[SEQ_CST_BLK]]:
+  // LLVM-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   store atomic i32 %[[VALUE]], ptr %[[PTR]] seq_cst, align 4
+  // LLVM-NEXT:   br label %{{.+}}
+  
+  // OGCG:        %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
+  // OGCG-NEXT:   %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG:        switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // OGCG-NEXT:     i32 3, label %[[RELEASE_BLK:.+]]
+  // OGCG-NEXT:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // OGCG-NEXT:   ]
+  // OGCG:      [[DEFAULT_BLK]]:
+  // OGCG-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   store atomic i32 %[[VALUE]], ptr %[[PTR]] monotonic, align 4
+  // OGCG-NEXT:   br label %{{.+}}
+  // OGCG:      [[RELEASE_BLK]]:
+  // OGCG-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   store atomic i32 %[[VALUE]], ptr %[[PTR]] release, align 4
+  // OGCG-NEXT:   br label %{{.+}}
+  // OGCG:      [[SEQ_CST_BLK]]:
+  // OGCG-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   store atomic i32 %[[VALUE]], ptr %[[PTR]] seq_cst, align 4
+  // OGCG-NEXT:   br label %{{.+}}
+}
+
+int atomic_load_and_store_dynamic_order(int *ptr, int order) {
+  // CIR-LABEL: atomic_load_and_store_dynamic_order
+  // LLVM-LABEL: atomic_load_and_store_dynamic_order
+  // OGCG-LABEL: atomic_load_and_store_dynamic_order
+
+  return __atomic_exchange_n(ptr, 20, order);
+
+  // CIR:      %[[PTR:.+]] = cir.load align(8) %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+  // CIR-NEXT: %[[ORDER:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR:      cir.switch (%[[ORDER]] : !s32i) {
+  // CIR-NEXT:   cir.case(default, []) {
+  // CIR-NEXT:     %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR-NEXT:     %[[RES:.+]] = cir.atomic.xchg relaxed %[[PTR]], %[[LIT]] : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
+  // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT:.+]] : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) {
+  // CIR-NEXT:     %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR-NEXT:     %[[RES:.+]] = cir.atomic.xchg acquire %[[PTR]], %[[LIT]] : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
+  // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.case(anyof, [#cir.int<3> : !s32i]) {
+  // CIR-NEXT:     %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR-NEXT:     %[[RES:.+]] = cir.atomic.xchg release %[[PTR]], %[[LIT]] : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
+  // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.case(anyof, [#cir.int<4> : !s32i]) {
+  // CIR-NEXT:     %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR-NEXT:     %[[RES:.+]] = cir.atomic.xchg acq_rel %[[PTR]], %[[LIT]] : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
+  // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.case(anyof, [#cir.int<5> : !s32i]) {
+  // CIR-NEXT:     %[[LIT:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, 
!s32i
+  // CIR-NEXT:     %[[RES:.+]] = cir.atomic.xchg seq_cst %[[PTR]], %[[LIT]] : 
(!cir.ptr<!s32i>, !s32i) -> !s32i
+  // CIR-NEXT:     cir.store align(4) %[[RES]], %[[RES_SLOT]] : !s32i, 
!cir.ptr<!s32i>
+  // CIR-NEXT:     cir.break
+  // CIR-NEXT:   }
+  // CIR-NEXT:   cir.yield
+  // CIR-NEXT: }
+  // CIR-NEXT: %{{.+}} = cir.load align(4) %[[RES_SLOT]] : !cir.ptr<!s32i>, 
!s32i
+
+  // LLVM:        %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
+  // LLVM-NEXT:   %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM:        br label %[[SWITCH_BLK:.+]]
+  // LLVM:      [[SWITCH_BLK]]:
+  // LLVM-NEXT:   switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // LLVM-NEXT:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // LLVM-NEXT:     i32 2, label %[[ACQUIRE_BLK]]
+  // LLVM-NEXT:     i32 3, label %[[RELEASE_BLK:.+]]
+  // LLVM-NEXT:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // LLVM-NEXT:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // LLVM-NEXT:   ]
+  // LLVM:      [[DEFAULT_BLK]]:
+  // LLVM-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
monotonic, align 4
+  // LLVM-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT:.+]], align 4
+  // LLVM-NEXT:   br label %[[CONTINUE_BLK:.+]]
+  // LLVM:      [[ACQUIRE_BLK]]:
+  // LLVM-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
acquire, align 4
+  // LLVM-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // LLVM-NEXT:   br label %[[CONTINUE_BLK]]
+  // LLVM:      [[RELEASE_BLK]]:
+  // LLVM-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
release, align 4
+  // LLVM-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // LLVM-NEXT:   br label %[[CONTINUE_BLK]]
+  // LLVM:      [[ACQ_REL_BLK]]:
+  // LLVM-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
acq_rel, align 4
+  // LLVM-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // LLVM-NEXT:   br label %[[CONTINUE_BLK]]
+  // LLVM:      [[SEQ_CST_BLK]]:
+  // LLVM-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // LLVM-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
seq_cst, align 4
+  // LLVM-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // LLVM-NEXT:   br label %[[CONTINUE_BLK]]
+  // LLVM:      [[CONTINUE_BLK]]:
+  // LLVM-NEXT:   %{{.+}} = load i32, ptr %[[RES_SLOT]], align 4
+  
+  // OGCG:        %[[PTR:.+]] = load ptr, ptr %{{.+}}, align 8
+  // OGCG-NEXT:   %[[ORDER:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG:        switch i32 %[[ORDER]], label %[[DEFAULT_BLK:.+]] [
+  // OGCG-NEXT:     i32 1, label %[[ACQUIRE_BLK:.+]]
+  // OGCG-NEXT:     i32 2, label %[[ACQUIRE_BLK]]
+  // OGCG-NEXT:     i32 3, label %[[RELEASE_BLK:.+]]
+  // OGCG-NEXT:     i32 4, label %[[ACQ_REL_BLK:.+]]
+  // OGCG-NEXT:     i32 5, label %[[SEQ_CST_BLK:.+]]
+  // OGCG-NEXT:   ]
+  // OGCG:      [[DEFAULT_BLK]]:
+  // OGCG-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
monotonic, align 4
+  // OGCG-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT:.+]], align 4
+  // OGCG-NEXT:   br label %[[CONTINUE_BLK:.+]]
+  // OGCG:      [[ACQUIRE_BLK]]:
+  // OGCG-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
acquire, align 4
+  // OGCG-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // OGCG-NEXT:   br label %[[CONTINUE_BLK]]
+  // OGCG:      [[RELEASE_BLK]]:
+  // OGCG-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
release, align 4
+  // OGCG-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // OGCG-NEXT:   br label %[[CONTINUE_BLK]]
+  // OGCG:      [[ACQ_REL_BLK]]:
+  // OGCG-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
acq_rel, align 4
+  // OGCG-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // OGCG-NEXT:   br label %[[CONTINUE_BLK]]
+  // OGCG:      [[SEQ_CST_BLK]]:
+  // OGCG-NEXT:   %[[VALUE:.+]] = load i32, ptr %{{.+}}, align 4
+  // OGCG-NEXT:   %[[RES:.+]] = atomicrmw xchg ptr %[[PTR]], i32 %[[VALUE]] 
seq_cst, align 4
+  // OGCG-NEXT:   store i32 %[[RES]], ptr %[[RES_SLOT]], align 4
+  // OGCG-NEXT:   br label %[[CONTINUE_BLK]]
+  // OGCG:      [[CONTINUE_BLK]]:
+  // OGCG-NEXT:   %{{.+}} = load i32, ptr %[[RES_SLOT]], align 4
+}

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to