https://github.com/Pierre-vh created 
https://github.com/llvm/llvm-project/pull/177343

Load monitor operations make more sense as atomic operations, as
non-atomic operations cannot be used for inter-thread communication w/o
additional synchronization.
The previous built-in made it work because one could just override the CPol
bits, but that bypasses the memory model and forces the user to learn about
ISA bits encoding.

Making load monitor an atomic operation has a couple of advantages. First,
the memory model foundation for it is stronger. We just lean on the existing
rules for atomic operations. Second, the CPol bits are abstracted away from
the user, which avoids leaking ISA details into the API.

This patch also adds supporting memory model and intrinsics documentation to
AMDGPUUsage.

Solves SWDEV-516398.

>From fb660332f0da602c840d5606132a5f60ba4713ba Mon Sep 17 00:00:00 2001
From: pvanhout <[email protected]>
Date: Wed, 21 Jan 2026 14:54:22 +0100
Subject: [PATCH] [AMDGPU][GFX12.5] Reimplement monitor load as an atomic
 operation

Load monitor operations make more sense as atomic operations, as
non-atomic operations cannot be used for inter-thread communication w/o
additional synchronization.
The previous built-in made it work because one could just override the CPol
bits, but that bypasses the memory model and forces the user to learn about
ISA bits encoding.

Making load monitor an atomic operation has a couple of advantages. First,
the memory model foundation for it is stronger. We just lean on the existing
rules for atomic operations. Second, the CPol bits are abstracted away from
the user, which avoids leaking ISA details into the API.

This patch also adds supporting memory model and intrinsics documentation to
AMDGPUUsage.

Solves SWDEV-516398.
---
 clang/include/clang/Basic/BuiltinsAMDGPU.td   |  12 +-
 clang/include/clang/Sema/SemaAMDGPU.h         |  12 ++
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   |  55 ++++----
 clang/lib/Sema/SemaAMDGPU.cpp                 |  69 +++++++---
 .../builtins-amdgcn-gfx1250-load-monitor.cl   |  48 +++----
 .../builtins-amdgcn-error-gfx1250-param.cl    |  38 ++++--
 llvm/docs/AMDGPUUsage.rst                     | 108 ++++++++++++---
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  21 ++-
 llvm/lib/Target/AMDGPU/AMDGPUGISel.td         |   4 +
 llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td     |   9 ++
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |  20 +++
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  |  22 ++-
 llvm/lib/Target/AMDGPU/FLATInstructions.td    |  12 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  86 ++++++++----
 llvm/lib/Target/AMDGPU/SIInstructions.td      |  10 ++
 .../llvm.amdgcn.load.monitor.gfx1250.ll       | 128 ++++++++++--------
 16 files changed, 449 insertions(+), 205 deletions(-)

diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td 
b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index 12ffad305e7c0..3032b387d91ab 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -707,12 +707,12 @@ def __builtin_amdgcn_s_cluster_barrier : 
AMDGPUBuiltin<"void()", [], "gfx1250-in
 def __builtin_amdgcn_flat_prefetch : AMDGPUBuiltin<"void(void const 
address_space<0> *, _Constant int)", [Const], "vmem-pref-insts">;
 def __builtin_amdgcn_global_prefetch : AMDGPUBuiltin<"void(void const 
address_space<1> *, _Constant int)", [Const], "vmem-pref-insts">;
 
-def __builtin_amdgcn_global_load_monitor_b32 : AMDGPUBuiltin<"int(int 
address_space<1> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, 
int>(_ExtVector<2, int address_space<1> *>, _Constant int)", [Const], 
"gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, 
int>(_ExtVector<4, int address_space<1> *>, _Constant int)", [Const], 
"gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b32 : AMDGPUBuiltin<"int(int 
address_space<0> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, 
int>(_ExtVector<2, int address_space<0> *>, _Constant int)", [Const], 
"gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, 
int>(_ExtVector<4, int address_space<0> *>, _Constant int)", [Const], 
"gfx1250-insts">;
+def __builtin_amdgcn_global_atomic_load_monitor_b32 : AMDGPUBuiltin<"int(int 
address_space<1> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_atomic_load_monitor_b64 : 
AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<1> *>, 
_Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_atomic_load_monitor_b128 : 
AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<1> *>, 
_Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_atomic_load_monitor_b32 : AMDGPUBuiltin<"int(int 
address_space<0> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_atomic_load_monitor_b64 : 
AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<0> *>, 
_Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_atomic_load_monitor_b128 : 
AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<0> *>, 
_Constant int, char const *)", [Const], "gfx1250-insts">;
 def __builtin_amdgcn_cluster_load_b32 : AMDGPUBuiltin<"int(int 
address_space<1> *, _Constant int, int)", [Const], 
"mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b64 : AMDGPUBuiltin<"_ExtVector<2, 
int>(_ExtVector<2, int address_space<1> *>, _Constant int, int)", [Const], 
"mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b128 : AMDGPUBuiltin<"_ExtVector<4, 
int>(_ExtVector<4, int address_space<1> *>, _Constant int, int)", [Const], 
"mcast-load-insts,wavefrontsize32">;
diff --git a/clang/include/clang/Sema/SemaAMDGPU.h 
b/clang/include/clang/Sema/SemaAMDGPU.h
index bac812a9d4fcf..eb6e73dd7322f 100644
--- a/clang/include/clang/Sema/SemaAMDGPU.h
+++ b/clang/include/clang/Sema/SemaAMDGPU.h
@@ -26,7 +26,19 @@ class SemaAMDGPU : public SemaBase {
 
   bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 
+  /// Emits a diagnostic if the arg at \p ArgIdx of \p TheCall is not a string
+  /// literal. \returns true if a diagnostic was emitted.
+  bool checkStringLiteralArg(CallExpr *TheCall, unsigned ArgIdx);
+
+  /// Emits a diagnostic if the arg at \p ArgIdx of \p TheCall is not atomic
+  /// ordering encoded in the C ABI format, or if the atomic ordering is not
+  /// valid for the operation type as defined by \p MayLoad and \p MayStore.
+  /// \returns true if a diagnostic was emitted.
+  bool checkAtomicOrderingCABIArg(CallExpr *TheCall, unsigned ArgIdx,
+                                  bool MayLoad, bool MayStore);
+
   bool checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore);
+  bool checkAtomicMonitorLoad(CallExpr *TheCall);
 
   bool checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs,
                                unsigned NumDataArgs);
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp 
b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index a8a5bc348f00c..5c720619ec7db 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -366,6 +366,14 @@ void 
CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
   Inst->setMetadata(LLVMContext::MD_mmra, MMRAMetadata::getMD(Ctx, MMRAs));
 }
 
+static llvm::MetadataAsValue *getStringAsMDValue(llvm::LLVMContext &Ctx,
+                                                 const clang::Expr *E) {
+  StringRef Arg =
+      cast<clang::StringLiteral>(E->IgnoreParenCasts())->getString();
+  llvm::MDNode *MD = llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, Arg)});
+  return llvm::MetadataAsValue::get(Ctx, MD);
+}
+
 static Intrinsic::ID getIntrinsicIDforWaveReduction(unsigned BuiltinID) {
   switch (BuiltinID) {
   default:
@@ -781,40 +789,42 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned 
BuiltinID,
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
     return Builder.CreateCall(F, {Addr});
   }
-  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
-  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
-  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
-  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
-  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
-  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128: {
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128: {
 
     Intrinsic::ID IID;
     switch (BuiltinID) {
-    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
-      IID = Intrinsic::amdgcn_global_load_monitor_b32;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
+      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b32;
       break;
-    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
-      IID = Intrinsic::amdgcn_global_load_monitor_b64;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
+      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b64;
       break;
-    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
-      IID = Intrinsic::amdgcn_global_load_monitor_b128;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
+      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b128;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
-      IID = Intrinsic::amdgcn_flat_load_monitor_b32;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
+      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b32;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
-      IID = Intrinsic::amdgcn_flat_load_monitor_b64;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
+      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b64;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
-      IID = Intrinsic::amdgcn_flat_load_monitor_b128;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128:
+      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b128;
       break;
     }
 
+    LLVMContext &Ctx = CGM.getLLVMContext();
     llvm::Type *LoadTy = ConvertType(E->getType());
     llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
-    llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+    llvm::Value *AO = EmitScalarExpr(E->getArg(1));
+    llvm::Value *Scope = getStringAsMDValue(Ctx, E->getArg(2));
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
-    return Builder.CreateCall(F, {Addr, Val});
+    return Builder.CreateCall(F, {Addr, AO, Scope});
   }
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b32:
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b64:
@@ -876,10 +886,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned 
BuiltinID,
     const unsigned ScopeArg = E->getNumArgs() - 1;
     for (unsigned i = 0; i != ScopeArg; ++i)
       Args.push_back(EmitScalarExpr(E->getArg(i)));
-    StringRef Arg = 
cast<StringLiteral>(E->getArg(ScopeArg)->IgnoreParenCasts())
-                        ->getString();
-    llvm::MDNode *MD = llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, Arg)});
-    Args.push_back(llvm::MetadataAsValue::get(Ctx, MD));
+    Args.push_back(getStringAsMDValue(Ctx, E->getArg(ScopeArg)));
     // Intrinsic is typed based on the pointer AS. Pointer is always the first
     // argument.
     llvm::Function *F = CGM.getIntrinsic(IID, {Args[0]->getType()});
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index 4261e1849133f..a53cadd27a184 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -119,6 +119,13 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned 
BuiltinID,
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B:
     return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true);
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
+    return checkAtomicMonitorLoad(TheCall);
   case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32:
   case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32:
   case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32:
@@ -341,22 +348,9 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned 
BuiltinID,
   return false;
 }
 
-bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
-  bool Fail = false;
-
-  // First argument is a global or generic pointer.
-  Expr *PtrArg = TheCall->getArg(0);
-  QualType PtrTy = PtrArg->getType()->getPointeeType();
-  unsigned AS = getASTContext().getTargetAddressSpace(PtrTy.getAddressSpace());
-  if (AS != llvm::AMDGPUAS::FLAT_ADDRESS &&
-      AS != llvm::AMDGPUAS::GLOBAL_ADDRESS) {
-    Fail = true;
-    Diag(TheCall->getBeginLoc(), diag::err_amdgcn_coop_atomic_invalid_as)
-        << PtrArg->getSourceRange();
-  }
-
-  // Check atomic ordering
-  Expr *AtomicOrdArg = TheCall->getArg(IsStore ? 2 : 1);
+bool SemaAMDGPU::checkAtomicOrderingCABIArg(CallExpr *TheCall, unsigned ArgIdx,
+                                            bool MayLoad, bool MayStore) {
+  Expr *AtomicOrdArg = TheCall->getArg(ArgIdx);
   Expr::EvalResult AtomicOrdArgRes;
   if (!AtomicOrdArg->EvaluateAsInt(AtomicOrdArgRes, getASTContext()))
     llvm_unreachable("Intrinsic requires imm for atomic ordering argument!");
@@ -366,22 +360,55 @@ bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr 
*TheCall, bool IsStore) {
   // Atomic ordering cannot be acq_rel in any case, acquire for stores or
   // release for loads.
   if (!llvm::isValidAtomicOrderingCABI((unsigned)Ord) ||
-      (Ord == llvm::AtomicOrderingCABI::acq_rel) ||
-      Ord == (IsStore ? llvm::AtomicOrderingCABI::acquire
-                      : llvm::AtomicOrderingCABI::release)) {
+      (!(MayLoad && MayStore) && (Ord == llvm::AtomicOrderingCABI::acq_rel)) ||
+      (!MayLoad && Ord == llvm::AtomicOrderingCABI::acquire) ||
+      (!MayStore && Ord == llvm::AtomicOrderingCABI::release)) {
     return Diag(AtomicOrdArg->getBeginLoc(),
                 diag::warn_atomic_op_has_invalid_memory_order)
            << 0 << AtomicOrdArg->getSourceRange();
   }
 
-  // Last argument is a string literal
+  return false;
+}
+
+bool SemaAMDGPU::checkStringLiteralArg(CallExpr *TheCall, unsigned ArgIdx) {
   Expr *Arg = TheCall->getArg(TheCall->getNumArgs() - 1);
   if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
-    Fail = true;
     Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
         << Arg->getSourceRange();
+    return true;
+  }
+  return false;
+}
+
+bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
+  bool Fail = false;
+
+  // First argument is a global or generic pointer.
+  Expr *PtrArg = TheCall->getArg(0);
+  QualType PtrTy = PtrArg->getType()->getPointeeType();
+  unsigned AS = getASTContext().getTargetAddressSpace(PtrTy.getAddressSpace());
+  if (AS != llvm::AMDGPUAS::FLAT_ADDRESS &&
+      AS != llvm::AMDGPUAS::GLOBAL_ADDRESS) {
+    Fail = true;
+    Diag(TheCall->getBeginLoc(), diag::err_amdgcn_coop_atomic_invalid_as)
+        << PtrArg->getSourceRange();
   }
 
+  // Check atomic ordering
+  Fail |= checkAtomicOrderingCABIArg(
+      TheCall, IsStore ? 2 : 1, /*MayLoad=*/!IsStore, /*MayStore=*/IsStore);
+  // Last argument is the syncscope as a string literal.
+  Fail |= checkStringLiteralArg(TheCall, TheCall->getNumArgs() - 1);
+
+  return Fail;
+}
+
+bool SemaAMDGPU::checkAtomicMonitorLoad(CallExpr *TheCall) {
+  bool Fail = false;
+  Fail |= checkAtomicOrderingCABIArg(TheCall, 1, /*MayLoad=*/true,
+                                     /*MayStore=*/false);
+  Fail |= checkStringLiteralArg(TheCall, 2);
   return Fail;
 }
 
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
index f2552d40fa273..efdbfc25714fb 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
@@ -5,62 +5,62 @@
 typedef int    v2i   __attribute__((ext_vector_type(2)));
 typedef int    v4i   __attribute__((ext_vector_type(4)));
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b32(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b32(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 
@llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) [[INPTR:%.*]], i32 1)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 
@llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) [[INPTR:%.*]], 
i32 0, metadata [[META8:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret i32 [[TMP0]]
 //
-int test_amdgcn_global_load_monitor_b32(global int* inptr)
+int test_amdgcn_global_atomic_load_monitor_b32(global int* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b32(inptr, 1);
+  return __builtin_amdgcn_global_atomic_load_monitor_b32(inptr, 
__ATOMIC_RELAXED, "");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b64(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b64(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> 
@llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) [[INPTR:%.*]], i32 
10)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> 
@llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) 
[[INPTR:%.*]], i32 2, metadata [[META9:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <2 x i32> [[TMP0]]
 //
-v2i test_amdgcn_global_load_monitor_b64(global v2i* inptr)
+v2i test_amdgcn_global_atomic_load_monitor_b64(global v2i* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b64(inptr, 10);
+  return __builtin_amdgcn_global_atomic_load_monitor_b64(inptr, 
__ATOMIC_ACQUIRE, "agent");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b128(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b128(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> 
@llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) [[INPTR:%.*]], i32 
22)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> 
@llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1) 
[[INPTR:%.*]], i32 2, metadata [[META10:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <4 x i32> [[TMP0]]
 //
-v4i test_amdgcn_global_load_monitor_b128(global v4i* inptr)
+v4i test_amdgcn_global_atomic_load_monitor_b128(global v4i* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b128(inptr, 22);
+  return __builtin_amdgcn_global_atomic_load_monitor_b128(inptr, 
__ATOMIC_ACQUIRE, "workgroup");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b32(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b32(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 
@llvm.amdgcn.flat.load.monitor.b32.i32(ptr [[INPTR:%.*]], i32 27)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 
@llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr [[INPTR:%.*]], i32 0, 
metadata [[META8]])
 // CHECK-GFX1250-NEXT:    ret i32 [[TMP0]]
 //
-int test_amdgcn_flat_load_monitor_b32(int* inptr)
+int test_amdgcn_flat_atomic_load_monitor_b32(int* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b32(inptr, 27);
+  return __builtin_amdgcn_flat_atomic_load_monitor_b32(inptr, 
__ATOMIC_RELAXED, "");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b64(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b64(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> 
@llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr [[INPTR:%.*]], i32 1)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> 
@llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr [[INPTR:%.*]], i32 5, 
metadata [[META11:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <2 x i32> [[TMP0]]
 //
-v2i test_amdgcn_flat_load_monitor_b64(v2i* inptr)
+v2i test_amdgcn_flat_atomic_load_monitor_b64(v2i* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b64(inptr, 1);
+  return __builtin_amdgcn_flat_atomic_load_monitor_b64(inptr, 
__ATOMIC_SEQ_CST, "cluster");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b128(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b128(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> 
@llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 0)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> 
@llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 0, 
metadata [[META8]])
 // CHECK-GFX1250-NEXT:    ret <4 x i32> [[TMP0]]
 //
-v4i test_amdgcn_flat_load_monitor_b128(v4i* inptr)
+v4i test_amdgcn_flat_atomic_load_monitor_b128(v4i* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b128(inptr, 0);
+  return __builtin_amdgcn_flat_atomic_load_monitor_b128(inptr, 
__ATOMIC_RELAXED, "");
 }
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl 
b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index da6a03bc93eeb..dfbe5e3b30396 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -94,15 +94,37 @@ void test_cvt_scale_pk(global half8 *outh8, global bfloat8 
*outy8, uint2 src2,
   *outf16 = __builtin_amdgcn_cvt_scale_pk16_f32_bf6(src3, scale, 16); // 
expected-error {{argument value 16 is outside the valid range [0, 15]}}
 }
 
-void test_amdgcn_load_monitor(global int* b32gaddr, global v2i* b64gaddr, 
global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
-                              global int* b32out, global v2i* b64out, global 
v4i* b128out, int cpol)
+void test_amdgcn_atomic_load_monitor_ao_constant(global int* b32gaddr, global 
v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i 
*b128faddr,
+                              global int* b32out, global v2i* b64out, global 
v4i* b128out, int ao)
 {
-  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, cpol); // 
expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant 
integer}}
-  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, cpol); // 
expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant 
integer}}
-  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, cpol); // 
expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant 
integer}}
-  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, cpol); // 
expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant 
integer}}
-  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, cpol); // 
expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant 
integer}}
-  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, cpol); // 
expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant 
integer}}
+  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, ao, 
""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b32' must 
be a constant integer}}
+  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, ao, 
""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b64' must 
be a constant integer}}
+  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, ao, 
""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b128' 
must be a constant integer}}
+  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, ao, ""); 
// expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b32' must be a 
constant integer}}
+  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, ao, ""); 
// expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b64' must be a 
constant integer}}
+  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, ao, 
""); // expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b128' must 
be a constant integer}}
+}
+
+void test_amdgcn_atomic_load_monitor_ao_valid(global int* b32gaddr, global 
v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i 
*b128faddr,
+                              global int* b32out, global v2i* b64out, global 
v4i* b128out)
+{
+  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, 
__ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic 
operation is invalid}}
+  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, 
__ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic 
operation is invalid}}
+  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, 
__ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic 
operation is invalid}}
+  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, 
__ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic 
operation is invalid}}
+  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, 
__ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic 
operation is invalid}}
+  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, 
__ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic 
operation is invalid}}
+}
+
+void test_amdgcn_atomic_load_monitor_scope_literal(global int* b32gaddr, 
global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i 
*b128faddr,
+                              global int* b32out, global v2i* b64out, global 
v4i* b128out, const char* scope)
+{
+  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, 
__ATOMIC_RELAXED, scope); // expected-error {{expression is not a string 
literal}}
+  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, 
__ATOMIC_RELAXED, scope); // expected-error {{expression is not a string 
literal}}
+  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, 
__ATOMIC_RELAXED, scope); // expected-error {{expression is not a string 
literal}}
+  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, 
__ATOMIC_RELAXED, scope); // expected-error {{expression is not a string 
literal}}
+  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, 
__ATOMIC_RELAXED, scope); // expected-error {{expression is not a string 
literal}}
+  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, 
__ATOMIC_RELAXED, scope); // expected-error {{expression is not a string 
literal}}
 }
 
 void test_amdgcn_cluster_load(global int* addr32, global v2i* addr64, global 
v4i* addr128, global int* b32out, global v2i* b64out, global v4i* b128out, int 
cpol, int mask)
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 994affe0cfa06..588167c004b03 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1596,6 +1596,33 @@ The AMDGPU backend implements the following LLVM IR 
intrinsics.
                                                    * 1 - Data cache.
 
                                                    Instruction cache 
prefetches are unsafe on invalid address.
+
+  llvm.amdgcn.flat.atomic.load.monitor             Available on GFX12.5 only.
+                                                   Corresponds to 
``flat_load_monitor_b32/64/128`` (``.b32/64/128`` suffixes)
+                                                   instructions.
+                                                   For the purposes of the 
memory model, this is an atomic load operation in
+                                                   the generic (flat) address 
space.
+
+                                                   This intrinsic has 3 
operands:
+
+                                                   * Flat pointer.
+                                                   * :ref:`Load Atomic 
Ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>`.
+                                                   * :ref:`Synchronization 
Scope<amdgpu-intrinsics-syncscope-metadata-operand>`.
+                                                     Note that the scope used 
must ensure that the L2 cache will be hit.
+
+  llvm.amdgcn.global.atomic.load.monitor           Available on GFX12.5 only.
+                                                   Corresponds to 
``global_load_monitor_b32/64/128`` (``.b32/64/128`` suffixes)
+                                                   instructions.
+                                                   For the purposes of the 
memory model, this is an atomic load operation in
+                                                   the global address space.
+
+                                                   This intrinsic has 3 
operands:
+
+                                                   * Flat pointer.
+                                                   * :ref:`Load Atomic 
Ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>`.
+                                                   * :ref:`Synchronization 
Scope<amdgpu-intrinsics-syncscope-metadata-operand>`.
+                                                     Note that the scope used 
must ensure that the L2 cache will be hit.
+
   ==============================================   
==========================================================
 
 .. TODO::
@@ -1661,28 +1688,64 @@ then this intrinsic causes undefined behavior.
 
 The intrinsics are available for the global (``.p1`` suffix) and generic 
(``.p0`` suffix) address spaces.
 
-The atomic ordering operand (3rd operand for ``.store``, 2nd for ``.load``) is 
an integer that follows the
-C ABI encoding of atomic memory orderings. The supported values are in
-:ref:`the table 
below<amdgpu-cooperative-atomic-intrinsics-atomic-memory-orderings-table>`.
+The 3rd operand for ``.store`` or 2nd for ``.load`` intrinsics is the
+:ref:`atomic ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>` 
of the operation.
+
+The last operand of the intrinsic is the
+:ref:`synchronization scope<amdgpu-intrinsics-syncscope-metadata-operand>` of 
the operation.
+
+Intrinsic Operands
+~~~~~~~~~~~~~~~~~~
+
+.. _amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand:
+
+C ABI Atomic Ordering Operand
++++++++++++++++++++++++++++++
+
+Intrinsic operands in this format are always ``i32`` integer constants whose 
value is
+determined by the C ABI encoding of atomic memory orderings. The supported 
values are in
+:ref:`the table below<amdgpu-intrinsics-c-abi-atomic-memory-orderings-table>`.
+
+  .. table:: AMDGPU Intrinsics C ABI Atomic Memory Ordering Values
+    :name: amdgpu-intrinsics-c-abi-atomic-memory-orderings-table
+
+    ========= ================ =================================
+    Value     Atomic Memory    Notes
+              Ordering
+    ========= ================ =================================
+    ``i32 0`` ``relaxed``      The default for unsupported values.
+
+    ``i32 2`` ``acquire``      Only for loads.
 
-  .. table:: AMDGPU Cooperative Atomic Intrinsics Atomic Memory Orderings
-    :name: amdgpu-cooperative-atomic-intrinsics-atomic-memory-orderings-table
+    ``i32 3`` ``release``      Only for stores.
 
-    ====== ================ =================================
-    Value  Atomic Memory    Notes
-           Ordering
-    ====== ================ =================================
-    ``0``  ``relaxed``      The default for unsupported values.
+    ``i32 5`` ``seq_cst``
+    ========= ================ =================================
 
-    ``2``  ``acquire``      Only for ``.load``
+Example:
 
-    ``3``  ``release``      Only for ``.store``
+.. code::
+
+  ; "i32 5" is the atomic ordering operand
+  %0 = tail call i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr %addr, 
i32 5, metadata !0)
 
-    ``5``  ``seq_cst``
-    ====== ================ =================================
+.. _amdgpu-intrinsics-syncscope-metadata-operand:
+
+Syncscope Metadata Operand
+++++++++++++++++++++++++++
 
-The last argument of the intrinsic is the synchronization scope
-as a metadata string, which must be one of the supported :ref:`memory 
scopes<amdgpu-memory-scopes>`.
+Intrinsics operand in this format are metadata strings which must be one of 
the supported
+:ref:`memory scopes<amdgpu-memory-scopes>`.
+The metadata node must be made of a single ``MDString`` at the top level.
+
+Example:
+
+.. code::
+
+  ; "metadata !0" is the syncscope metadata operand.
+  %0 = tail call i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr %addr, 
i32 4, metadata !0)
+
+  !0 = !{ !"agent" }
 
 .. _amdgpu_metadata:
 
@@ -16816,7 +16879,7 @@ For GFX125x:
 
   This section is currently incomplete as work on the compiler is still 
ongoing.
   The following is a non-exhaustive list of unimplemented/undocumented 
features:
-  non-volatile bit code sequences, monitor and wait, globally accessing 
scratch atomics,
+  non-volatile bit code sequences, globally accessing scratch atomics,
   multicast loads, barriers (including split barriers) and cooperative atomics.
   Scalar operations memory model needs more elaboration as well.
 
@@ -16917,6 +16980,17 @@ For GFX125x:
     issued to every address at the same time. They are kept in order with other
     memory operations from the same wave.
 
+* ``global_load_monitor_*`` and ``flat_load_monitor_*`` instructions load
+  data and request that the wave is notified (see ``s_monitor_sleep``) if
+  the L2 cache line that holds the data is evicted, or written to.
+
+  * In order to monitor a cache line in the L2 cache, these instructions must
+    ensure that the L2 cache is always hit by setting the ``SCOPE`` of the 
instruction
+    appropriately.
+  * For non-atomic and atomic code sequences, it is valid to replace
+    ``global_load_b32/64/128`` with a ``global_load_monitor_b32/64/128`` and a
+    ``flat_load_b32/64/128`` with a ``flat_load_monitor_b32/64/128``.
+
 Scalar memory operations are only used to access memory that is proven to not
 change during the execution of the kernel dispatch. This includes constant
 address space and global address space for program scope ``const`` variables.
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td 
b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index a8eba9ed126b7..4e1bbe477d810 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -4204,25 +4204,24 @@ def int_amdgcn_cluster_load_b32         : 
AMDGPUClusterLoad<global_ptr_ty>;
 def int_amdgcn_cluster_load_b64         : AMDGPUClusterLoad<global_ptr_ty>;
 def int_amdgcn_cluster_load_b128        : AMDGPUClusterLoad<global_ptr_ty>;
 
-class AMDGPULoadMonitor<LLVMType ptr_ty>:
+class AMDGPUAtomicLoadMonitor<LLVMType ptr_ty>:
   Intrinsic<
     [llvm_any_ty],
     [ptr_ty,
-     llvm_i32_ty],  // gfx12+ cachepolicy:
-                    //   bits [0-2] = th
-                    //   bits [3-4] = scope
+     llvm_i32_ty,            // C ABI Atomic Ordering ID
+     llvm_metadata_ty],      // syncscope
     [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, 
NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
      IntrWillReturn, IntrConvergent, IntrNoCallback, IntrNoFree],
     "",
-    [SDNPMemOperand]
+    [SDNPMemOperand, SDNPMayLoad]
   >;
 
-def int_amdgcn_flat_load_monitor_b32    : AMDGPULoadMonitor<flat_ptr_ty>;
-def int_amdgcn_flat_load_monitor_b64    : AMDGPULoadMonitor<flat_ptr_ty>;
-def int_amdgcn_flat_load_monitor_b128   : AMDGPULoadMonitor<flat_ptr_ty>;
-def int_amdgcn_global_load_monitor_b32  : AMDGPULoadMonitor<global_ptr_ty>;
-def int_amdgcn_global_load_monitor_b64  : AMDGPULoadMonitor<global_ptr_ty>;
-def int_amdgcn_global_load_monitor_b128 : AMDGPULoadMonitor<global_ptr_ty>;
+def int_amdgcn_flat_atomic_load_monitor_b32    : 
AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
+def int_amdgcn_flat_atomic_load_monitor_b64    : 
AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
+def int_amdgcn_flat_atomic_load_monitor_b128   : 
AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
+def int_amdgcn_global_atomic_load_monitor_b32  : 
AMDGPUAtomicLoadMonitor<global_ptr_ty>;
+def int_amdgcn_global_atomic_load_monitor_b64  : 
AMDGPUAtomicLoadMonitor<global_ptr_ty>;
+def int_amdgcn_global_atomic_load_monitor_b128 : 
AMDGPUAtomicLoadMonitor<global_ptr_ty>;
 
 /// Emit an addrspacecast without null pointer checking.
 /// Should only be inserted by a pass based on analysis of an addrspacecast's 
src.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td 
b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 55ce4f1738e37..eff26727f1045 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -329,6 +329,10 @@ def : GINodeEquiv<G_AMDGPU_WHOLE_WAVE_FUNC_SETUP, 
AMDGPUwhole_wave_setup>;
 // G_AMDGPU_WHOLE_WAVE_FUNC_RETURN is simpler than AMDGPUwhole_wave_return,
 // so we don't mark it as equivalent.
 
+def : GINodeEquiv<G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR, 
AMDGPUflat_atomic_load_monitor>;
+def : GINodeEquiv<G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR, 
AMDGPUglobal_atomic_load_monitor>;
+
+
 class GISelSop2Pat <
   SDPatternOperator node,
   Instruction inst,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td 
b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index 8a43c2da38346..1d109bce030e0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -402,6 +402,15 @@ def AMDGPUExportOp : SDTypeProfile<0, 8, [
 
 ]>;
 
+def AMDGPUflat_atomic_load_monitor : SDNode<
+  "AMDGPUISD::FLAT_ATOMIC_LOAD_MONITOR", SDTLoad,
+  [SDNPHasChain, SDNPMemOperand]
+>;
+
+def AMDGPUglobal_atomic_load_monitor : SDNode<
+  "AMDGPUISD::GLOBAL_ATOMIC_LOAD_MONITOR", SDTLoad,
+  [SDNPHasChain, SDNPMemOperand]
+>;
 
 
//===----------------------------------------------------------------------===//
 // Flow Control Profile Types
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 3698b0062b8d0..198e5b41de184 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -8170,6 +8170,26 @@ bool 
AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     B.buildStore(MI.getOperand(2), MI.getOperand(1), **MI.memoperands_begin());
     MI.eraseFromParent();
     return true;
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
+    assert(MI.hasOneMemOperand() && "Expected IRTranslator to set MemOp!");
+    B.buildInstr(AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR)
+        .add(MI.getOperand(0))
+        .add(MI.getOperand(2))
+        .addMemOperand(*MI.memoperands_begin());
+    MI.eraseFromParent();
+    return true;
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b128:
+    assert(MI.hasOneMemOperand() && "Expected IRTranslator to set MemOp!");
+    B.buildInstr(AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR)
+        .add(MI.getOperand(0))
+        .add(MI.getOperand(2))
+        .addMemOperand(*MI.memoperands_begin());
+    MI.eraseFromParent();
+    return true;
   default: {
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
             AMDGPU::getImageDimIntrinsicInfo(IntrID))
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 7470fecd3c03f..5c8babd94b98d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3496,6 +3496,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
     executeInWaterfallLoop(B, make_range(Start, End), SGPROperandRegs);
     break;
   }
+  case AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR:
   case AMDGPU::G_LOAD:
   case AMDGPU::G_ZEXTLOAD:
   case AMDGPU::G_SEXTLOAD: {
@@ -5332,12 +5334,12 @@ AMDGPURegisterBankInfo::getInstrMapping(const 
MachineInstr &MI) const {
     case Intrinsic::amdgcn_ds_load_tr16_b128:
     case Intrinsic::amdgcn_ds_load_tr4_b64:
     case Intrinsic::amdgcn_ds_load_tr6_b96:
-    case Intrinsic::amdgcn_flat_load_monitor_b32:
-    case Intrinsic::amdgcn_flat_load_monitor_b64:
-    case Intrinsic::amdgcn_flat_load_monitor_b128:
-    case Intrinsic::amdgcn_global_load_monitor_b32:
-    case Intrinsic::amdgcn_global_load_monitor_b64:
-    case Intrinsic::amdgcn_global_load_monitor_b128:
+    // case Intrinsic::amdgcn_flat_load_monitor_b32:
+    // case Intrinsic::amdgcn_flat_load_monitor_b64:
+    // case Intrinsic::amdgcn_flat_load_monitor_b128:
+    // case Intrinsic::amdgcn_global_load_monitor_b32:
+    // case Intrinsic::amdgcn_global_load_monitor_b64:
+    // case Intrinsic::amdgcn_global_load_monitor_b128:
     case Intrinsic::amdgcn_ds_read_tr4_b64:
     case Intrinsic::amdgcn_ds_read_tr6_b96:
     case Intrinsic::amdgcn_ds_read_tr8_b64:
@@ -5752,6 +5754,14 @@ AMDGPURegisterBankInfo::getInstrMapping(const 
MachineInstr &MI) const {
   case AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_RETURN:
     OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
     break;
+  case AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR: {
+    unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
+    unsigned PtrSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
+    OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
+    OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, PtrSize);
+    break;
+  }
   }
 
   return getInstructionMapping(/*ID*/1, /*Cost*/1,
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td 
b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 05594a7a7d56b..7aee287eb7649 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -2389,13 +2389,13 @@ let WaveSizePredicate = isWave32,  OtherPredicates = 
[HasTransposeLoadF4F6Insts]
 }
 
 let OtherPredicates = [isGFX125xOnly] in {
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B32,  
int_amdgcn_flat_load_monitor_b32,  i32>;
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B64,  
int_amdgcn_flat_load_monitor_b64,  v2i32>;
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B128, 
int_amdgcn_flat_load_monitor_b128, v4i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B32,  AMDGPUflat_atomic_load_monitor,  
i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B64,  AMDGPUflat_atomic_load_monitor,  
v2i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B128, AMDGPUflat_atomic_load_monitor, 
v4i32>;
 
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B32,  
int_amdgcn_global_load_monitor_b32,  i32>;
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B64,  
int_amdgcn_global_load_monitor_b64,  v2i32>;
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B128, 
int_amdgcn_global_load_monitor_b128, v4i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B32,  
AMDGPUglobal_atomic_load_monitor,  i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B64,  
AMDGPUglobal_atomic_load_monitor,  v2i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B128, 
AMDGPUglobal_atomic_load_monitor, v4i32>;
 } // End SubtargetPredicate = isGFX125xOnly
 
 let OtherPredicates = [isGFX1250Plus] in {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp 
b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ae5e9fa2cb695..58068c0af77f5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1273,51 +1273,54 @@ static unsigned getIntrMemWidth(unsigned IntrID) {
   case Intrinsic::amdgcn_global_store_async_from_lds_b32:
   case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
   case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
     return 32;
   case Intrinsic::amdgcn_global_load_async_to_lds_b64:
   case Intrinsic::amdgcn_cluster_load_async_to_lds_b64:
   case Intrinsic::amdgcn_global_store_async_from_lds_b64:
   case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
   case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
     return 64;
   case Intrinsic::amdgcn_global_load_async_to_lds_b128:
   case Intrinsic::amdgcn_cluster_load_async_to_lds_b128:
   case Intrinsic::amdgcn_global_store_async_from_lds_b128:
   case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
   case Intrinsic::amdgcn_cooperative_atomic_store_8x16B:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b128:
     return 128;
   default:
     llvm_unreachable("Unknown width");
   }
 }
 
-static void getCoopAtomicOperandsInfo(const CallBase &CI, bool IsLoad,
-                                      TargetLoweringBase::IntrinsicInfo &Info) 
{
-  Value *OrderingArg = CI.getArgOperand(IsLoad ? 1 : 2);
+static AtomicOrdering parseAtomicOrderingCABIArg(const CallBase &CI,
+                                                 unsigned ArgIdx) {
+  Value *OrderingArg = CI.getArgOperand(ArgIdx);
   unsigned Ord = cast<ConstantInt>(OrderingArg)->getZExtValue();
   switch (AtomicOrderingCABI(Ord)) {
   case AtomicOrderingCABI::acquire:
-    Info.order = AtomicOrdering::Acquire;
+    return AtomicOrdering::Acquire;
     break;
   case AtomicOrderingCABI::release:
-    Info.order = AtomicOrdering::Release;
+    return AtomicOrdering::Release;
     break;
   case AtomicOrderingCABI::seq_cst:
-    Info.order = AtomicOrdering::SequentiallyConsistent;
+    return AtomicOrdering::SequentiallyConsistent;
     break;
   default:
-    Info.order = AtomicOrdering::Monotonic;
-    break;
+    return AtomicOrdering::Monotonic;
   }
+}
 
-  Info.flags =
-      (IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore);
-  Info.flags |= MOCooperative;
-
+static unsigned parseSyncscopeMDArg(const CallBase &CI, unsigned ArgIdx) {
   MDNode *ScopeMD = cast<MDNode>(
-      cast<MetadataAsValue>(CI.getArgOperand(IsLoad ? 2 : 3))->getMetadata());
+      cast<MetadataAsValue>(CI.getArgOperand(ArgIdx))->getMetadata());
   StringRef Scope = cast<MDString>(ScopeMD->getOperand(0))->getString();
-  Info.ssid = CI.getContext().getOrInsertSyncScopeID(Scope);
+  return CI.getContext().getOrInsertSyncScopeID(Scope);
 }
 
 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
@@ -1531,12 +1534,6 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo 
&Info,
                   MachineMemOperand::MOVolatile;
     return true;
   }
-  case Intrinsic::amdgcn_flat_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_load_monitor_b64:
-  case Intrinsic::amdgcn_flat_load_monitor_b128:
-  case Intrinsic::amdgcn_global_load_monitor_b32:
-  case Intrinsic::amdgcn_global_load_monitor_b64:
-  case Intrinsic::amdgcn_global_load_monitor_b128:
   case Intrinsic::amdgcn_cluster_load_b32:
   case Intrinsic::amdgcn_cluster_load_b64:
   case Intrinsic::amdgcn_cluster_load_b128:
@@ -1559,6 +1556,21 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo 
&Info,
     Info.flags |= MachineMemOperand::MOLoad;
     return true;
   }
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b128: {
+    Info.opc = ISD::INTRINSIC_W_CHAIN;
+    Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
+    Info.ptrVal = CI.getOperand(0);
+    Info.align.reset();
+    Info.flags = MachineMemOperand::MOLoad;
+    Info.order = parseAtomicOrderingCABIArg(CI, 1);
+    Info.ssid = parseSyncscopeMDArg(CI, 2);
+    return true;
+  }
   case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
   case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
   case Intrinsic::amdgcn_cooperative_atomic_load_8x16B: {
@@ -1566,7 +1578,9 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo 
&Info,
     Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
     Info.ptrVal = CI.getOperand(0);
     Info.align.reset();
-    getCoopAtomicOperandsInfo(CI, /*IsLoad=*/true, Info);
+    Info.flags = (MachineMemOperand::MOLoad | MOCooperative);
+    Info.order = parseAtomicOrderingCABIArg(CI, 1);
+    Info.ssid = parseSyncscopeMDArg(CI, 2);
     return true;
   }
   case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
@@ -1576,7 +1590,9 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo 
&Info,
     Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
     Info.ptrVal = CI.getArgOperand(0);
     Info.align.reset();
-    getCoopAtomicOperandsInfo(CI, /*IsLoad=*/false, Info);
+    Info.flags = (MachineMemOperand::MOStore | MOCooperative);
+    Info.order = parseAtomicOrderingCABIArg(CI, 2);
+    Info.ssid = parseSyncscopeMDArg(CI, 3);
     return true;
   }
   case Intrinsic::amdgcn_ds_gws_init:
@@ -1715,15 +1731,9 @@ bool SITargetLowering::getAddrModeArguments(const 
IntrinsicInst *II,
   case Intrinsic::amdgcn_ds_atomic_barrier_arrive_rtn_b64:
   case Intrinsic::amdgcn_flat_atomic_fmax_num:
   case Intrinsic::amdgcn_flat_atomic_fmin_num:
-  case Intrinsic::amdgcn_flat_load_monitor_b128:
-  case Intrinsic::amdgcn_flat_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_load_monitor_b64:
   case Intrinsic::amdgcn_global_atomic_fmax_num:
   case Intrinsic::amdgcn_global_atomic_fmin_num:
   case Intrinsic::amdgcn_global_atomic_ordered_add_b64:
-  case Intrinsic::amdgcn_global_load_monitor_b128:
-  case Intrinsic::amdgcn_global_load_monitor_b32:
-  case Intrinsic::amdgcn_global_load_monitor_b64:
   case Intrinsic::amdgcn_global_load_tr_b64:
   case Intrinsic::amdgcn_global_load_tr_b128:
   case Intrinsic::amdgcn_global_load_tr4_b64:
@@ -11022,6 +11032,26 @@ SDValue 
SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
     return DAG.getAtomicLoad(ISD::NON_EXTLOAD, DL, MII->getMemoryVT(), VT,
                              Chain, Ptr, MII->getMemOperand());
   }
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128: {
+    MemIntrinsicSDNode *MII = cast<MemIntrinsicSDNode>(Op);
+    SDValue Chain = Op->getOperand(0);
+    SDValue Ptr = Op->getOperand(2);
+    return DAG.getMemIntrinsicNode(AMDGPUISD::FLAT_ATOMIC_LOAD_MONITOR, DL,
+                                   Op->getVTList(), {Chain, Ptr},
+                                   MII->getMemoryVT(), MII->getMemOperand());
+  }
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b128: {
+    MemIntrinsicSDNode *MII = cast<MemIntrinsicSDNode>(Op);
+    SDValue Chain = Op->getOperand(0);
+    SDValue Ptr = Op->getOperand(2);
+    return DAG.getMemIntrinsicNode(AMDGPUISD::GLOBAL_ATOMIC_LOAD_MONITOR, DL,
+                                   Op->getVTList(), {Chain, Ptr},
+                                   MII->getMemoryVT(), MII->getMemOperand());
+  }
   default:
 
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td 
b/llvm/lib/Target/AMDGPU/SIInstructions.td
index e06bc912113a8..5ab17dbfdefd3 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4828,6 +4828,16 @@ def G_AMDGPU_READANYLANE : AMDGPUGenericInstruction {
   let hasSideEffects = 0;
 }
 
+class AtomicLoadMonitorInstruction : AMDGPUGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins ptype1:$ptr);
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+}
+
+def G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR : AtomicLoadMonitorInstruction;
+def G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR : AtomicLoadMonitorInstruction;
+
 
//============================================================================//
 // Dummy Instructions
 
//============================================================================//
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll 
b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
index 910c55a041ede..7c2a78a4676db 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
@@ -2,77 +2,82 @@
 ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck 
-check-prefixes=GFX1250,GFX1250-SDAG %s
 ; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck 
-check-prefixes=GFX1250,GFX1250-GISEL %s
 
-declare i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1), i32)
-declare <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1), 
i32)
-declare <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr 
addrspace(1), i32)
-declare i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr, i32)
-declare <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr, i32)
-declare <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr, i32)
-
-define amdgpu_ps void @global_load_monitor_b32_vaddr(ptr addrspace(1) %addr, 
ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b32_vaddr:
+declare i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1), 
i32, metadata)
+declare <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr 
addrspace(1), i32, metadata)
+declare <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr 
addrspace(1), i32, metadata)
+declare i32 @llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr, i32, metadata)
+declare <2 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr, i32, 
metadata)
+declare <4 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr, i32, 
metadata)
+
+
+define amdgpu_ps void @global_atomic_load_monitor_b32_vaddr_relaxed_sys(ptr 
addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b32_vaddr_relaxed_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b32 v0, v[0:1], off offset:32 
th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b32 v0, v[0:1], off offset:32 
scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) 
%gep, i32 1)
+  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr 
addrspace(1) %gep, i32 0, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b32_saddr(ptr addrspace(1) inreg 
%addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b32_saddr:
+define amdgpu_ps void @global_atomic_load_monitor_b32_saddr_relaxed_sys(ptr 
addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b32_saddr_relaxed_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] offset:32 
th:TH_LOAD_HT scope:SCOPE_SE
+; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] offset:32 
scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[0:1], v2, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) 
%gep, i32 10)
+  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr 
addrspace(1) %gep, i32 0, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_vaddr(ptr addrspace(1) %addr, 
ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b64_vaddr:
+define amdgpu_ps void @global_atomic_load_monitor_b64_vaddr_acquire_agent(ptr 
addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b64_vaddr_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b64 v[0:1], v[0:1], off offset:32 
th:TH_LOAD_NT_HT scope:SCOPE_DEV
+; GFX1250-NEXT:    global_load_monitor_b64 v[0:1], v[0:1], off offset:32 
scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr 
addrspace(1) %gep, i32 22)
+  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr 
addrspace(1) %gep, i32 2, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr(ptr addrspace(1) inreg 
%addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr:
+define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_acquire_agent(ptr 
addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b64_saddr_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] offset:32 
th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] offset:32 
scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr 
addrspace(1) %gep, i32 27)
+  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr 
addrspace(1) %gep, i32 2, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b128_vaddr(ptr addrspace(1) %addr, 
ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b128_vaddr:
+define amdgpu_ps void 
@global_atomic_load_monitor_b128_vaddr_seq_cst_workgroup(ptr addrspace(1) 
%addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b128_vaddr_seq_cst_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    global_load_monitor_b128 v[4:7], v[0:1], off offset:32
@@ -81,122 +86,137 @@ define amdgpu_ps void @global_load_monitor_b128_vaddr(ptr 
addrspace(1) %addr, pt
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr 
addrspace(1) %gep, i32 0)
+  %val = call <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr 
addrspace(1) %gep, i32 5, metadata !2)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b128_saddr(ptr addrspace(1) inreg 
%addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b128_saddr:
+define amdgpu_ps void 
@global_atomic_load_monitor_b128_saddr_seq_cst_workgroup(ptr addrspace(1) inreg 
%addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b128_saddr_seq_cst_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b128 v[2:5], v2, s[0:1] offset:32 
th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b128 v[2:5], v2, s[0:1] offset:32
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b128 v[0:1], v[2:5], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr 
addrspace(1) %gep, i32 1)
+  %val = call <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr 
addrspace(1) %gep, i32 5, metadata !2)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b32(ptr %addr, ptr addrspace(1) %use) 
{
-; GFX1250-LABEL: flat_load_monitor_b32:
+define amdgpu_ps void @flat_atomic_load_monitor_b32_seq_cst_sys(ptr %addr, ptr 
addrspace(1) %use) {
+; GFX1250-LABEL: flat_atomic_load_monitor_b32_seq_cst_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b32 v0, v[0:1] offset:32 th:TH_LOAD_HT 
scope:SCOPE_SE
+; GFX1250-NEXT:    flat_load_monitor_b32 v0, v[0:1] offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr addrspace(0) 
%gep, i32 10)
+  %val = call i32 @llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr 
addrspace(0) %gep, i32 5, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b64(ptr %addr, ptr addrspace(1) %use) 
{
-; GFX1250-LABEL: flat_load_monitor_b64:
+define amdgpu_ps void @flat_atomic_load_monitor_b64_seq_cst_agent(ptr %addr, 
ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_atomic_load_monitor_b64_seq_cst_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b64 v[0:1], v[0:1] offset:32 
th:TH_LOAD_NT_HT scope:SCOPE_DEV
+; GFX1250-NEXT:    flat_load_monitor_b64 v[0:1], v[0:1] offset:32 
scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr 
addrspace(0) %gep, i32 22)
+  %val = call <2 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr 
addrspace(0) %gep, i32 5, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b128(ptr %addr, ptr addrspace(1) 
%use) {
-; GFX1250-LABEL: flat_load_monitor_b128:
+define amdgpu_ps void @flat_atomic_load_monitor_b128_acquire_sys(ptr %addr, 
ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_atomic_load_monitor_b128_acquire_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b128 v[4:7], v[0:1] offset:32 
th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX1250-NEXT:    flat_load_monitor_b128 v[4:7], v[0:1] offset:32 
scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b128 v[2:3], v[4:7], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr 
addrspace(0) %gep, i32 27)
+  %val = call <4 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr 
addrspace(0) %gep, i32 2, metadata !0)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b32_saddr_scale_offset(ptr 
addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b32_saddr_scale_offset:
+define amdgpu_ps void 
@global_atomic_load_monitor_b32_saddr_scale_offset_acquire_agent(ptr 
addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: 
global_atomic_load_monitor_b32_saddr_scale_offset_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] scale_offset 
th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] scale_offset 
scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[0:1], v2, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i32, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) 
%gep, i32 1)
+  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr 
addrspace(1) %gep, i32 2, metadata !1)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr_scale_offset(ptr 
addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr_scale_offset:
+define amdgpu_ps void 
@global_atomic_load_monitor_b64_saddr_scale_offset_acquire_workgroup(ptr 
addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: 
global_atomic_load_monitor_b64_saddr_scale_offset_acquire_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] scale_offset 
th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] scale_offset
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i64, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr 
addrspace(1) %gep, i32 1)
+  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr 
addrspace(1) %gep, i32 2, metadata !2)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr_no_scale_offset(ptr 
addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr_no_scale_offset:
+define amdgpu_ps void 
@global_atomic_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys(ptr 
addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: 
global_atomic_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX1250-NEXT:    v_lshl_add_u64 v[2:3], v[2:3], 2, s[0:1]
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v[2:3], off th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v[2:3], off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i32, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr 
addrspace(1) %gep, i32 1)
+  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr 
addrspace(1) %gep, i32 5, metadata !0)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
+
+!0 = !{ !"" }
+!1 = !{ !"agent" }
+!2 = !{ !"workgroup" }
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add 
tests below this line:
 ; GFX1250-GISEL: {{.*}}
 ; GFX1250-SDAG: {{.*}}

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to