https://github.com/krzysz00 updated 
https://github.com/llvm/llvm-project/pull/137425

>From bcb72e3d8cb2dcdb97199d32797306c5807c8442 Mon Sep 17 00:00:00 2001
From: Krzysztof Drewniak <krzysztof.drewn...@amd.com>
Date: Sat, 26 Apr 2025 00:20:22 +0000
Subject: [PATCH 1/4] [AMDGPU] Add a new amdgcn.load.to.lds intrinsic

This PR adds a amdgns_load_to_lds intrinsic that abstracts over loads
to LDS from global (address space 1) pointers and buffer fat
pointers (address space 7), since they use the saem API and "gather
from a pointer to LDS" is something of an abstract operation.

This commet adds the intrinsic and its lowerings for addrspaces 1 and
7, and updates the MLIR wrappers to use it (loosening up the
restrictions on loads to LDS along the way to match the ground truth
from target features).

It also plumbs the intrinsic through to clang.
---
 clang/include/clang/Basic/BuiltinsAMDGPU.def  |   1 +
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   |   4 +
 clang/lib/Sema/SemaAMDGPU.cpp                 |   1 +
 .../CodeGenOpenCL/builtins-amdgcn-gfx950.cl   |  30 +++
 .../builtins-amdgcn-load-to-lds.cl            |  60 +++++
 llvm/docs/ReleaseNotes.md                     |   8 +
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  21 ++
 .../AMDGPU/AMDGPUInstructionSelector.cpp      |   5 +
 .../AMDGPU/AMDGPULowerBufferFatPointers.cpp   |  20 ++
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  |   2 +
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   8 +-
 .../AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll  |  75 ++++++
 .../CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll | 220 ++++++++++++++++++
 .../lower-buffer-fat-pointers-mem-transfer.ll |  18 ++
 mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td |  12 +-
 mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td  |  35 ++-
 .../AMDGPUToROCDL/AMDGPUToROCDL.cpp           |  15 +-
 mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp  |  21 +-
 .../Conversion/AMDGPUToROCDL/load_lds.mlir    |  67 ++++--
 mlir/test/Dialect/LLVMIR/rocdl.mlir           |  17 +-
 mlir/test/Target/LLVMIR/rocdl.mlir            |  11 +-
 21 files changed, 598 insertions(+), 53 deletions(-)
 create mode 100644 clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
 create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll

diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def 
b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 39fef9e4601f8..730fd15913c11 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -257,6 +257,7 @@ TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2bf16, 
"V2sV2s*0V2s", "t", "at
 TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2bf16, "V2sV2s*1V2s", "t", 
"atomic-global-pk-add-bf16-inst")
 TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2bf16, "V2sV2s*3V2s", "t", 
"atomic-ds-pk-add-16-insts")
 TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2f16, "V2hV2h*3V2h", "t", 
"atomic-ds-pk-add-16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_load_to_lds, "vv*v*3IUiIiIUi", "t", 
"vmem-to-lds-load-insts")
 TARGET_BUILTIN(__builtin_amdgcn_global_load_lds, "vv*1v*3IUiIiIUi", "t", 
"vmem-to-lds-load-insts")
 
 
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp 
b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index ad012d98635ff..a32ef1c2a5a12 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -564,6 +564,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned 
BuiltinID,
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
     return Builder.CreateCall(F, {Addr});
   }
+  case AMDGPU::BI__builtin_amdgcn_load_to_lds: {
+    return emitBuiltinWithOneOverloadedType<5>(*this, E,
+                                               Intrinsic::amdgcn_load_to_lds);
+  }
   case AMDGPU::BI__builtin_amdgcn_get_fpenv: {
     Function *F = CGM.getIntrinsic(Intrinsic::get_fpenv,
                                    {llvm::Type::getInt64Ty(getLLVMContext())});
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index a6366aceec2a6..e6414a623b929 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -36,6 +36,7 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned 
BuiltinID,
 
   switch (BuiltinID) {
   case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_lds:
+  case AMDGPU::BI__builtin_amdgcn_load_to_lds:
   case AMDGPU::BI__builtin_amdgcn_global_load_lds: {
     constexpr const int SizeIdx = 2;
     llvm::APSInt Size;
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
index 8251d6c213e3d..4b73347ac8155 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
@@ -1766,6 +1766,36 @@ void test_cvt_sr_f16_f32(global half2 *out, float src, 
uint seed)
   *out = __builtin_amdgcn_cvt_sr_f16_f32(*out, src, seed, 1);
 }
 
+// CHECK-LABEL: @test_load_to_lds_96(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, 
addrspace(5)
+// CHECK-NEXT:    [[DST_ADDR:%.*]] = alloca ptr addrspace(3), align 4, 
addrspace(5)
+// CHECK-NEXT:    store ptr addrspace(1) [[SRC:%.*]], ptr addrspace(5) 
[[SRC_ADDR]], align 8
+// CHECK-NEXT:    store ptr addrspace(3) [[DST:%.*]], ptr addrspace(5) 
[[DST_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr addrspace(5) 
[[SRC_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr addrspace(5) 
[[DST_ADDR]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 12, i32 0, i32 0)
+// CHECK-NEXT:    ret void
+//
+void test_load_to_lds_96(global void* src, local void *dst) {
+  __builtin_amdgcn_global_load_lds(src, dst, /*size=*/12, /*offset=*/0, 
/*aux=*/0);
+}
+
+// CHECK-LABEL: @test_load_to_lds_128(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, 
addrspace(5)
+// CHECK-NEXT:    [[DST_ADDR:%.*]] = alloca ptr addrspace(3), align 4, 
addrspace(5)
+// CHECK-NEXT:    store ptr addrspace(1) [[SRC:%.*]], ptr addrspace(5) 
[[SRC_ADDR]], align 8
+// CHECK-NEXT:    store ptr addrspace(3) [[DST:%.*]], ptr addrspace(5) 
[[DST_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr addrspace(5) 
[[SRC_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr addrspace(5) 
[[DST_ADDR]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 16, i32 0, i32 0)
+// CHECK-NEXT:    ret void
+//
+void test_load_to_lds_128(global void* src, local void *dst) {
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/16, /*offset=*/0, /*aux=*/0);
+}
+
 // CHECK-LABEL: @test_global_load_lds_96(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, 
addrspace(5)
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
new file mode 100644
index 0000000000000..6cdedb33bdd80
--- /dev/null
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
@@ -0,0 +1,60 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown 
-target-cpu gfx900 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown 
-target-cpu gfx942 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown 
-target-cpu gfx1010 -emit-llvm -o - %s | FileCheck %s
+// REQUIRES: amdgpu-registered-target
+
+typedef unsigned int u32;
+typedef unsigned short u16;
+typedef unsigned char u8;
+
+// CHECK-LABEL: @test_load_to_lds_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, 
addrspace(5)
+// CHECK-NEXT:    [[DST_ADDR:%.*]] = alloca ptr addrspace(3), align 4, 
addrspace(5)
+// CHECK-NEXT:    [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[SRC_ADDR]] to ptr
+// CHECK-NEXT:    [[DST_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[DST_ADDR]] to ptr
+// CHECK-NEXT:    store ptr addrspace(1) [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], 
align 8
+// CHECK-NEXT:    store ptr addrspace(3) [[DST:%.*]], ptr [[DST_ADDR_ASCAST]], 
align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 4, i32 0, i32 0)
+// CHECK-NEXT:    ret void
+//
+void test_load_to_lds_u32(global u32* src, local u32 *dst) {
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/4, /*offset=*/0, /*aux=*/0);
+}
+
+// CHECK-LABEL: @test_load_to_lds_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, 
addrspace(5)
+// CHECK-NEXT:    [[DST_ADDR:%.*]] = alloca ptr addrspace(3), align 4, 
addrspace(5)
+// CHECK-NEXT:    [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[SRC_ADDR]] to ptr
+// CHECK-NEXT:    [[DST_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[DST_ADDR]] to ptr
+// CHECK-NEXT:    store ptr addrspace(1) [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], 
align 8
+// CHECK-NEXT:    store ptr addrspace(3) [[DST:%.*]], ptr [[DST_ADDR_ASCAST]], 
align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 2, i32 0, i32 0)
+// CHECK-NEXT:    ret void
+//
+void test_load_to_lds_u16(global u16* src, local u16 *dst) {
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/2, /*offset=*/0, /*aux=*/0);
+}
+
+// CHECK-LABEL: @test_load_to_lds_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, 
addrspace(5)
+// CHECK-NEXT:    [[DST_ADDR:%.*]] = alloca ptr addrspace(3), align 4, 
addrspace(5)
+// CHECK-NEXT:    [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[SRC_ADDR]] to ptr
+// CHECK-NEXT:    [[DST_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[DST_ADDR]] to ptr
+// CHECK-NEXT:    store ptr addrspace(1) [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], 
align 8
+// CHECK-NEXT:    store ptr addrspace(3) [[DST:%.*]], ptr [[DST_ADDR_ASCAST]], 
align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 1, i32 0, i32 0)
+// CHECK-NEXT:    ret void
+//
+void test_load_to_lds_u8(global u8* src, local u8 *dst) {
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/1, /*offset=*/0, /*aux=*/0);
+}
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index aded1bd67ed76..e43069cda87d7 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -103,6 +103,14 @@ Changes to the AMDGPU Backend
 
 * Bump the default `.amdhsa_code_object_version` to 6. ROCm 6.3 is required to 
run any program compiled with COV6.
 
+* Add a new `amdgcn.load.to.lds` intrinsic that wraps the existing 
global.load.lds
+intrinsic and has the same semantics. This intrinsic allows using buffer fat 
pointers
+(`ptr addrspace(7)`) as arguments, allowing loads to LDS from these pointers 
to be
+represented in the IR without needing to use buffer resource intrinsics 
directly.
+This intrinsic is exposed to Clang as `__builtin_amdgcn_load_to_lds`, though
+buffer fat pointers are not yet enabled in Clang. Migration to this intrinsic 
is
+optional, and there are no plans to deprecate `amdgcn.global.load.lds`.
+
 Changes to the ARM Backend
 --------------------------
 
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td 
b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index a57eb4a6dba49..3c9886a01d757 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2641,6 +2641,27 @@ def int_amdgcn_perm :
 // GFX9 Intrinsics
 
//===----------------------------------------------------------------------===//
 
+/// This is a general-purpose intrinsic for all operations that take a pointer
+/// a base location in LDS, and a data size and use it to perform a gather to 
LDS.
+/// This allows abstracting over both global pointers (address space 1) and
+/// the buffer-resource-wrapper pointers (address space 7 and 9).
+/// TODO: add support for address space 5 and scratch_load_lds.
+class AMDGPULoadToLDS :
+  ClangBuiltin<"__builtin_amdgcn_load_to_lds">,
+  Intrinsic <
+    [],
+    [llvm_anyptr_ty,                    // Base pointer to load from. Varies 
per lane.
+     LLVMQualPointerType<3>,            // LDS base pointer to store to. Must 
be wave-uniform.
+     llvm_i32_ty,                       // Data byte size: 1/2/4 (/12/16 for 
gfx950)
+     llvm_i32_ty,                       // imm offset (applied to both input 
and LDS address)
+     llvm_i32_ty],                      // auxiliary data (imm, cachepolicy 
(bit 0 = sc0,
+                                        //                                   
bit 1 = sc1,
+                                        //                                   
bit 4 = scc))
+    [IntrWillReturn, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
+     ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, 
IntrNoCallback, IntrNoFree],
+     "", [SDNPMemOperand]>;
+def int_amdgcn_load_to_lds : AMDGPULoadToLDS;
+
 class AMDGPUGlobalLoadLDS :
   ClangBuiltin<"__builtin_amdgcn_global_load_lds">,
   Intrinsic <
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 2fa03e3964207..907b5b7e705d7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2335,6 +2335,11 @@ bool 
AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
   case Intrinsic::amdgcn_struct_buffer_load_lds:
   case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
     return selectBufferLoadLds(I);
+  // Until we can store both the address space of the global and the LDS
+  // arguments by having tto MachineMemOperands on an intrinsic, we just trust
+  // that the argument is a global pointer (buffer pointers have been handled 
by
+  // a LLVM IR-level lowering).
+  case Intrinsic::amdgcn_load_to_lds:
   case Intrinsic::amdgcn_global_load_lds:
     return selectGlobalLoadLds(I);
   case Intrinsic::amdgcn_exp_compr:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index 284f6ede2b85b..d31f072a78e47 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -2151,6 +2151,7 @@ static bool isRemovablePointerIntrinsic(Intrinsic::ID 
IID) {
   case Intrinsic::memset:
   case Intrinsic::memset_inline:
   case Intrinsic::experimental_memset_pattern:
+  case Intrinsic::amdgcn_load_to_lds:
     return true;
   }
 }
@@ -2239,6 +2240,25 @@ PtrParts 
SplitPtrStructs::visitIntrinsicInst(IntrinsicInst &I) {
     SplitUsers.insert(&I);
     return {NewRsrc, Off};
   }
+  case Intrinsic::amdgcn_load_to_lds: {
+    Value *Ptr = I.getArgOperand(0);
+    if (!isSplitFatPtr(Ptr->getType()))
+      return {nullptr, nullptr};
+    IRB.SetInsertPoint(&I);
+    auto [Rsrc, Off] = getPtrParts(Ptr);
+    Value *LDSPtr = I.getArgOperand(1);
+    Value *LoadSize = I.getArgOperand(2);
+    Value *ImmOff = I.getArgOperand(3);
+    Value *Aux = I.getArgOperand(4);
+    Value *SOffset = IRB.getInt32(0);
+    Instruction *NewLoad = IRB.CreateIntrinsic(
+        Intrinsic::amdgcn_raw_ptr_buffer_load_lds, {},
+        {Rsrc, LDSPtr, LoadSize, Off, SOffset, ImmOff, Aux});
+    copyMetadata(NewLoad, &I);
+    SplitUsers.insert(&I);
+    I.replaceAllUsesWith(NewLoad);
+    return {nullptr, nullptr};
+  }
   }
   return {nullptr, nullptr};
 }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 1d0e81db5a5db..6085c8d584af2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3312,6 +3312,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
       constrainOpWithReadfirstlane(B, MI, 6); // soffset
       return;
     }
+    case Intrinsic::amdgcn_load_to_lds:
     case Intrinsic::amdgcn_global_load_lds: {
       applyDefaultMapping(OpdMapper);
       constrainOpWithReadfirstlane(B, MI, 2);
@@ -5273,6 +5274,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const 
MachineInstr &MI) const {
       OpdsMapping[1] = AMDGPU::getValueMapping(Bank, 32);
       break;
     }
+    case Intrinsic::amdgcn_load_to_lds:
     case Intrinsic::amdgcn_global_load_lds: {
       OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
       OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp 
b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index c0c1831957719..53a04e209974a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1450,6 +1450,7 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo 
&Info,
       Info.flags |= MachineMemOperand::MOStore;
     return true;
   }
+  case Intrinsic::amdgcn_load_to_lds:
   case Intrinsic::amdgcn_global_load_lds: {
     Info.opc = ISD::INTRINSIC_VOID;
     unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
@@ -1531,6 +1532,7 @@ bool SITargetLowering::getAddrModeArguments(const 
IntrinsicInst *II,
   case Intrinsic::amdgcn_global_load_tr_b128:
     Ptr = II->getArgOperand(0);
     break;
+  case Intrinsic::amdgcn_load_to_lds:
   case Intrinsic::amdgcn_global_load_lds:
     Ptr = II->getArgOperand(1);
     break;
@@ -10219,6 +10221,10 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue 
Op,
 
     return SDValue(Load, 0);
   }
+  // Buffers are handled by LowerBufferFatPointers, and we're going to go
+  // for "trust me" that the remaining cases are global pointers until
+  // such time as we can put two mem operands on an intrinsic.
+  case Intrinsic::amdgcn_load_to_lds:
   case Intrinsic::amdgcn_global_load_lds: {
     if (!Subtarget->hasVMemToLDSLoad())
       return SDValue();
@@ -10249,7 +10255,6 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue 
Op,
       break;
     }
 
-    auto *M = cast<MemSDNode>(Op);
     SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(3));
 
     SmallVector<SDValue, 6> Ops;
@@ -10289,6 +10294,7 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue 
Op,
     Ops.push_back(M0Val.getValue(0)); // Chain
     Ops.push_back(M0Val.getValue(1)); // Glue
 
+    auto *M = cast<MemSDNode>(Op);
     MachineMemOperand *LoadMMO = M->getMemOperand();
     MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
     LoadPtrI.Offset = Op->getConstantOperandVal(5);
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll 
b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
new file mode 100644
index 0000000000000..72ef6963c9976
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
@@ -0,0 +1,75 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 
UTC_ARGS: --version 5
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck 
-check-prefixes=GFX950,GFX950-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck 
-check-prefixes=GFX950,GFX950-GISEL %s
+
+; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx942 
-filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-SDAG %s
+; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx942 
-filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-GISEL %s
+
+; ERR-SDAG: LLVM ERROR: Cannot select: intrinsic %llvm.amdgcn.load.to.lds
+
+; ERR-GISEL: LLVM ERROR: cannot select: G_INTRINSIC_W_SIDE_EFFECTS 
intrinsic(@llvm.amdgcn.load.to.lds),
+
+;; Note: this is a bare-bones test to make sure that amdgcn.load.to.lds lowers 
to
+;; the correct intrinsic.
+
+declare void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) nocapture %gptr, ptr 
addrspace(3) nocapture %lptr, i32 %size, i32 %offset, i32 %aux)
+declare void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) nocapture %gptr, ptr 
addrspace(3) nocapture %lptr, i32 %size, i32 %offset, i32 %aux)
+
+;---------------------------------------------------------------------y
+; dwordx3
+;---------------------------------------------------------------------
+
+define amdgpu_ps void @global_load_lds_dwordx3_vaddr_saddr(ptr addrspace(1) 
nocapture %gptr, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX950-LABEL: global_load_lds_dwordx3_vaddr_saddr:
+; GFX950:       ; %bb.0:
+; GFX950-NEXT:    s_mov_b32 m0, s0
+; GFX950-NEXT:    s_nop 0
+; GFX950-NEXT:    global_load_lds_dwordx3 v[0:1], off offset:16 sc0
+; GFX950-NEXT:    s_endpgm
+  call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) %gptr, ptr 
addrspace(3) %lptr, i32 12, i32 16, i32 1)
+  ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx3_vaddr_saddr(ptr addrspace(7) 
nocapture inreg %gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX950-LABEL: buffer_load_lds_dwordx3_vaddr_saddr:
+; GFX950:       ; %bb.0:
+; GFX950-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX950-NEXT:    s_mov_b32 m0, s5
+; GFX950-NEXT:    s_nop 0
+; GFX950-NEXT:    buffer_load_dwordx3 v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX950-NEXT:    s_endpgm
+  %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
+  call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 12, i32 16, i32 1)
+  ret void
+}
+
+;---------------------------------------------------------------------
+; dwordx4
+;---------------------------------------------------------------------
+
+define amdgpu_ps void @global_load_lds_dwordx4_vaddr_saddr(ptr addrspace(1) 
nocapture %gptr, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX950-LABEL: global_load_lds_dwordx4_vaddr_saddr:
+; GFX950:       ; %bb.0:
+; GFX950-NEXT:    s_mov_b32 m0, s0
+; GFX950-NEXT:    s_nop 0
+; GFX950-NEXT:    global_load_lds_dwordx4 v[0:1], off offset:16 sc0
+; GFX950-NEXT:    s_endpgm
+  call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %gptr, ptr 
addrspace(3) %lptr, i32 16, i32 16, i32 1)
+  ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dwordx4_vaddr_saddr(ptr addrspace(7) 
nocapture inreg %gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX950-LABEL: buffer_load_lds_dwordx4_vaddr_saddr:
+; GFX950:       ; %bb.0:
+; GFX950-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX950-NEXT:    s_mov_b32 m0, s5
+; GFX950-NEXT:    s_nop 0
+; GFX950-NEXT:    buffer_load_dwordx4 v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX950-NEXT:    s_endpgm
+  %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
+  call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 16, i32 16, i32 1)
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add 
tests below this line:
+; GFX950-GISEL: {{.*}}
+; GFX950-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll 
b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll
new file mode 100644
index 0000000000000..8ab46fa6bf31c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll
@@ -0,0 +1,220 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck 
%s --check-prefix=GFX90A
+; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs < %s | FileCheck 
%s --check-prefix=GFX90A
+; RUN: llc -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck 
%s --check-prefix=GFX942
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | 
FileCheck %s --check-prefix=GFX10
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < 
%s | FileCheck %s --check-prefix=GFX942-GISEL
+
+;; Note: load.to.lds is a wrapper intrinsic around underlying operations.
+;; This is a bare-bones test to ensure that it lowers to the correct 
instructions.
+
+declare void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) nocapture %gptr, ptr 
addrspace(3) nocapture %lptr, i32 %size, i32 %offset, i32 %aux)
+declare void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) nocapture %gptr, ptr 
addrspace(3) nocapture %lptr, i32 %size, i32 %offset, i32 %aux)
+
+define amdgpu_ps void @global_load_lds_dword_vaddr_saddr(ptr addrspace(1) 
nocapture %gptr, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX90A-LABEL: global_load_lds_dword_vaddr_saddr:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    s_mov_b32 m0, s0
+; GFX90A-NEXT:    s_nop 0
+; GFX90A-NEXT:    global_load_dword v[0:1], off offset:16 glc lds
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX942-LABEL: global_load_lds_dword_vaddr_saddr:
+; GFX942:       ; %bb.0: ; %main_body
+; GFX942-NEXT:    s_mov_b32 m0, s0
+; GFX942-NEXT:    s_nop 0
+; GFX942-NEXT:    global_load_lds_dword v[0:1], off offset:16 sc0
+; GFX942-NEXT:    s_endpgm
+;
+; GFX10-LABEL: global_load_lds_dword_vaddr_saddr:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    s_mov_b32 m0, s0
+; GFX10-NEXT:    global_load_dword v[0:1], off offset:16 glc lds
+; GFX10-NEXT:    s_endpgm
+;
+; GFX942-GISEL-LABEL: global_load_lds_dword_vaddr_saddr:
+; GFX942-GISEL:       ; %bb.0: ; %main_body
+; GFX942-GISEL-NEXT:    s_mov_b32 m0, s0
+; GFX942-GISEL-NEXT:    s_nop 0
+; GFX942-GISEL-NEXT:    global_load_lds_dword v[0:1], off offset:16 sc0
+; GFX942-GISEL-NEXT:    s_endpgm
+main_body:
+  call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) %gptr, ptr 
addrspace(3) %lptr, i32 4, i32 16, i32 1)
+  ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_dword_vaddr_saddr(ptr addrspace(7) 
nocapture inreg %gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX90A-LABEL: buffer_load_lds_dword_vaddr_saddr:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX90A-NEXT:    s_mov_b32 m0, s5
+; GFX90A-NEXT:    s_nop 0
+; GFX90A-NEXT:    buffer_load_dword v0, s[0:3], 0 offen offset:16 glc lds
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX942-LABEL: buffer_load_lds_dword_vaddr_saddr:
+; GFX942:       ; %bb.0: ; %main_body
+; GFX942-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX942-NEXT:    s_mov_b32 m0, s5
+; GFX942-NEXT:    s_nop 0
+; GFX942-NEXT:    buffer_load_dword v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX942-NEXT:    s_endpgm
+;
+; GFX10-LABEL: buffer_load_lds_dword_vaddr_saddr:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    v_add_nc_u32_e32 v0, s4, v0
+; GFX10-NEXT:    s_mov_b32 m0, s5
+; GFX10-NEXT:    buffer_load_dword v0, s[0:3], 0 offen offset:16 glc lds
+; GFX10-NEXT:    s_endpgm
+;
+; GFX942-GISEL-LABEL: buffer_load_lds_dword_vaddr_saddr:
+; GFX942-GISEL:       ; %bb.0: ; %main_body
+; GFX942-GISEL-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX942-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX942-GISEL-NEXT:    s_nop 0
+; GFX942-GISEL-NEXT:    buffer_load_dword v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX942-GISEL-NEXT:    s_endpgm
+main_body:
+  %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
+  call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 4, i32 16, i32 1)
+  ret void
+}
+
+define amdgpu_ps void @global_load_lds_ushort_vaddr_saddr(ptr addrspace(1) 
nocapture %gptr, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX90A-LABEL: global_load_lds_ushort_vaddr_saddr:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    s_mov_b32 m0, s0
+; GFX90A-NEXT:    s_nop 0
+; GFX90A-NEXT:    global_load_ushort v[0:1], off offset:16 glc lds
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX942-LABEL: global_load_lds_ushort_vaddr_saddr:
+; GFX942:       ; %bb.0: ; %main_body
+; GFX942-NEXT:    s_mov_b32 m0, s0
+; GFX942-NEXT:    s_nop 0
+; GFX942-NEXT:    global_load_lds_ushort v[0:1], off offset:16 sc0
+; GFX942-NEXT:    s_endpgm
+;
+; GFX10-LABEL: global_load_lds_ushort_vaddr_saddr:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    s_mov_b32 m0, s0
+; GFX10-NEXT:    global_load_ushort v[0:1], off offset:16 glc lds
+; GFX10-NEXT:    s_endpgm
+;
+; GFX942-GISEL-LABEL: global_load_lds_ushort_vaddr_saddr:
+; GFX942-GISEL:       ; %bb.0: ; %main_body
+; GFX942-GISEL-NEXT:    s_mov_b32 m0, s0
+; GFX942-GISEL-NEXT:    s_nop 0
+; GFX942-GISEL-NEXT:    global_load_lds_ushort v[0:1], off offset:16 sc0
+; GFX942-GISEL-NEXT:    s_endpgm
+main_body:
+  call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) %gptr, ptr 
addrspace(3) %lptr, i32 2, i32 16, i32 1)
+  ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_ushort_vaddr_saddr(ptr addrspace(7) 
nocapture inreg %gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX90A-LABEL: buffer_load_lds_ushort_vaddr_saddr:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX90A-NEXT:    s_mov_b32 m0, s5
+; GFX90A-NEXT:    s_nop 0
+; GFX90A-NEXT:    buffer_load_ushort v0, s[0:3], 0 offen offset:16 glc lds
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX942-LABEL: buffer_load_lds_ushort_vaddr_saddr:
+; GFX942:       ; %bb.0: ; %main_body
+; GFX942-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX942-NEXT:    s_mov_b32 m0, s5
+; GFX942-NEXT:    s_nop 0
+; GFX942-NEXT:    buffer_load_ushort v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX942-NEXT:    s_endpgm
+;
+; GFX10-LABEL: buffer_load_lds_ushort_vaddr_saddr:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    v_add_nc_u32_e32 v0, s4, v0
+; GFX10-NEXT:    s_mov_b32 m0, s5
+; GFX10-NEXT:    buffer_load_ushort v0, s[0:3], 0 offen offset:16 glc lds
+; GFX10-NEXT:    s_endpgm
+;
+; GFX942-GISEL-LABEL: buffer_load_lds_ushort_vaddr_saddr:
+; GFX942-GISEL:       ; %bb.0: ; %main_body
+; GFX942-GISEL-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX942-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX942-GISEL-NEXT:    s_nop 0
+; GFX942-GISEL-NEXT:    buffer_load_ushort v0, s[0:3], 0 offen offset:16 sc0 
lds
+; GFX942-GISEL-NEXT:    s_endpgm
+main_body:
+  %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
+  call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 2, i32 16, i32 1)
+  ret void
+}
+
+define amdgpu_ps void @global_load_lds_ubyte_vaddr_saddr(ptr addrspace(1) 
nocapture %gptr, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX90A-LABEL: global_load_lds_ubyte_vaddr_saddr:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    s_mov_b32 m0, s0
+; GFX90A-NEXT:    s_nop 0
+; GFX90A-NEXT:    global_load_ubyte v[0:1], off offset:16 glc lds
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX942-LABEL: global_load_lds_ubyte_vaddr_saddr:
+; GFX942:       ; %bb.0: ; %main_body
+; GFX942-NEXT:    s_mov_b32 m0, s0
+; GFX942-NEXT:    s_nop 0
+; GFX942-NEXT:    global_load_lds_ubyte v[0:1], off offset:16 sc0
+; GFX942-NEXT:    s_endpgm
+;
+; GFX10-LABEL: global_load_lds_ubyte_vaddr_saddr:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    s_mov_b32 m0, s0
+; GFX10-NEXT:    global_load_ubyte v[0:1], off offset:16 glc lds
+; GFX10-NEXT:    s_endpgm
+;
+; GFX942-GISEL-LABEL: global_load_lds_ubyte_vaddr_saddr:
+; GFX942-GISEL:       ; %bb.0: ; %main_body
+; GFX942-GISEL-NEXT:    s_mov_b32 m0, s0
+; GFX942-GISEL-NEXT:    s_nop 0
+; GFX942-GISEL-NEXT:    global_load_lds_ubyte v[0:1], off offset:16 sc0
+; GFX942-GISEL-NEXT:    s_endpgm
+main_body:
+  call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) %gptr, ptr 
addrspace(3) %lptr, i32 1, i32 16, i32 1)
+  ret void
+}
+
+define amdgpu_ps void @buffer_load_lds_ubyte_vaddr_saddr(ptr addrspace(7) 
nocapture inreg %gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX90A-LABEL: buffer_load_lds_ubyte_vaddr_saddr:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX90A-NEXT:    s_mov_b32 m0, s5
+; GFX90A-NEXT:    s_nop 0
+; GFX90A-NEXT:    buffer_load_ubyte v0, s[0:3], 0 offen offset:16 glc lds
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX942-LABEL: buffer_load_lds_ubyte_vaddr_saddr:
+; GFX942:       ; %bb.0: ; %main_body
+; GFX942-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX942-NEXT:    s_mov_b32 m0, s5
+; GFX942-NEXT:    s_nop 0
+; GFX942-NEXT:    buffer_load_ubyte v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX942-NEXT:    s_endpgm
+;
+; GFX10-LABEL: buffer_load_lds_ubyte_vaddr_saddr:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    v_add_nc_u32_e32 v0, s4, v0
+; GFX10-NEXT:    s_mov_b32 m0, s5
+; GFX10-NEXT:    buffer_load_ubyte v0, s[0:3], 0 offen offset:16 glc lds
+; GFX10-NEXT:    s_endpgm
+;
+; GFX942-GISEL-LABEL: buffer_load_lds_ubyte_vaddr_saddr:
+; GFX942-GISEL:       ; %bb.0: ; %main_body
+; GFX942-GISEL-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX942-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX942-GISEL-NEXT:    s_nop 0
+; GFX942-GISEL-NEXT:    buffer_load_ubyte v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX942-GISEL-NEXT:    s_endpgm
+main_body:
+  %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
+  call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 1, i32 16, i32 1)
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-mem-transfer.ll 
b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-mem-transfer.ll
index ee51b0b84554e..1dbd0622591e9 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-mem-transfer.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-mem-transfer.ll
@@ -1724,3 +1724,21 @@ define void @memset_pattern_unknown(ptr addrspace(7) 
inreg %ptr, i32 inreg %leng
   call void @llvm.experimental.memset.pattern.p7.i32.i32(ptr addrspace(7) 
%ptr, i32 1, i32 %length, i1 false)
   ret void
 }
+
+;;; Buffer load to LDS
+
+declare void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7), ptr addrspace(3), 
i32 immarg, i32 immarg, i32 immarg)
+
+define void @llvm_amdgcn_load_to_lds(ptr addrspace(7) inreg %p, ptr 
addrspace(3) inreg %l, i32 %idx) {
+; CHECK-LABEL: define void @llvm_amdgcn_load_to_lds(
+; CHECK-SAME: { ptr addrspace(8), i32 } inreg [[P:%.*]], ptr addrspace(3) 
inreg [[L:%.*]], i32 [[IDX:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[P_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } 
[[P]], 0
+; CHECK-NEXT:    [[P_OFF:%.*]] = extractvalue { ptr addrspace(8), i32 } [[P]], 
1
+; CHECK-NEXT:    [[Q:%.*]] = add i32 [[P_OFF]], [[IDX]]
+; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr 
addrspace(8) [[P_RSRC]], ptr addrspace(3) [[L]], i32 4, i32 [[Q]], i32 0, i32 
16, i32 0)
+; CHECK-NEXT:    ret void
+;
+  %q = getelementptr i8, ptr addrspace(7) %p, i32 %idx
+  call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) %q, ptr addrspace(3) 
%l, i32 4, i32 16, i32 0)
+  ret void
+}
diff --git a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td 
b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
index f14aa5a2e1564..78facbfdc433a 100644
--- a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
+++ b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td
@@ -806,15 +806,17 @@ def AMDGPU_GatherToLDSOp :
     Results<(outs)> {
   let summary = "MLIR wrapper for CDNA mfma instructions";
   let description = [{
-    The `amdgpu.global_load` op is a wrapper around the `global_load_lds` 
instructions.
+    The `amdgpu.gather_to_lds` op is a wrapper around the `global_load_lds` 
instructions.
 
     Operands:
-    * `$src`: global memory memref to read from.
+    * `$src`: global memory (including fat buffer) memref to read from.
     * `$srcIndices`: indices into `$src` to read from for this thread.
     * `$dst`: LDS memory memref to write to.
     * `$dstIndices`: base indices into `$dst` to write to for the subgroup of 
this thread.
-      The elements gathered by the subgroup will be written in order of lane 
ID will be written
-      into contiguously starting at `$dst[$dstIndices]`.
+      The elements gathered by the subgroup will be written contiguously in 
order of lane ID
+      starting at `$dst[$dstIndices]`. Byte-sized (ex. i8) or short-sized (ex. 
i16)
+      types will be zero-padded/extended to 32 bits before being written. 
96-bit types
+      (ex. vector<3xf32>) will be zero-padded to 128 bits before being written.
     * `$transferType`: type of the data to be transferred by each thread. This 
is used to determine
       the size of the data to be transferred and the number of threads in the 
subgroup.
       The transfer type must be a scalar type or a vector type with a single 
element type.
@@ -822,7 +824,7 @@ def AMDGPU_GatherToLDSOp :
     The `$dst`, along with its indices, points to the memory location the 
subgroup of this thread
     will write to.
 
-    Note: only enabled for gfx942 and later.
+    Note: only supported on gfx9 and gfx10.
   }];
   let assemblyFormat = [{
     $src `[` $srcIndices `]` `,` $dst `[` $dstIndices `]` attr-dict `:` 
$transferType `,` type($src) `,` type($dst)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td 
b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
index 93e59e0e7e6be..6fb9e3aba1f0a 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
@@ -446,17 +446,40 @@ def ROCDL_ds_read_tr6_b96 : 
ROCDL_LDS_Read_Tr_IntrOp<"ds.read.tr6.b96">;
 def ROCDL_ds_read_tr16_b64 : ROCDL_LDS_Read_Tr_IntrOp<"ds.read.tr16.b64">;
 
 //===---------------------------------------------------------------------===//
-// Global load to LDS intrinsic (available in GFX950)
+// Load to LDS intrinsic (available in GFX9 and GFX10)
+//===---------------------------------------------------------------------===//
+
+def ROCDL_LoadToLDSOp :
+  ROCDL_IntrOp<"load.to.lds", [], [0], [], 0, 0, 1, [2, 3, 4], ["size", 
"offset", "aux"]> {
+  dag args = (ins Arg<LLVM_AnyPointer, "", [MemRead]>:$globalPtr,
+                 Arg<ROCDLBufferLDS, "", [MemWrite]>:$ldsPtr,
+                 I32Attr:$size,
+                 I32Attr:$offset,
+                 I32Attr:$aux);
+  let arguments = !con(args, aliasAttrs);
+  let assemblyFormat = [{
+    $globalPtr `,`  $ldsPtr `,` $size `,` $offset `,` $aux
+    attr-dict `:` type($globalPtr)
+  }];
+  let extraClassDefinition = [{
+    ::llvm::SmallVector<::mlir::Value> $cppClass::getAccessedOperands() {
+      return {getGlobalPtr(), getLdsPtr()};
+    }
+  }];
+}
 
 def ROCDL_GlobalLoadLDSOp :
-  ROCDL_IntrOp<"global.load.lds", [], [], [], 0, 0, 1> {
+  ROCDL_IntrOp<"global.load.lds", [], [], [], 0, 0, 1, [2, 3, 4], ["size", 
"offset", "aux"]> {
   dag args = (ins Arg<ROCDLGlobalBuffer, "", [MemRead]>:$globalPtr,
                  Arg<ROCDLBufferLDS, "", [MemWrite]>:$ldsPtr,
-                 I32:$size,
-                 I32:$offset,
-                 I32:$aux);
+                 I32Attr:$size,
+                 I32Attr:$offset,
+                 I32Attr:$aux);
   let arguments = !con(args, aliasAttrs);
-  let assemblyFormat = "operands attr-dict";
+  let assemblyFormat = [{
+    $globalPtr `,`  $ldsPtr `,` $size `,` $offset `,` $aux
+    attr-dict
+  }];
   let extraClassDefinition = [{
     ::llvm::SmallVector<::mlir::Value> $cppClass::getAccessedOperands() {
       return {getGlobalPtr(), getLdsPtr()};
diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp 
b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
index 91dbc2de65c4e..0ce5255090471 100644
--- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
+++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
@@ -1019,8 +1019,8 @@ struct GatherToLDSOpLowering : public 
ConvertOpToLLVMPattern<GatherToLDSOp> {
   LogicalResult
   matchAndRewrite(GatherToLDSOp op, GatherToLDSOpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    if (chipset < kGfx942)
-      return op.emitOpError("chipset not supported");
+    if (chipset.majorVersion < 9 || chipset.majorVersion > 10)
+      return op.emitOpError("pre-gfx9 and post-gfx10 not supported");
 
     Location loc = op.getLoc();
 
@@ -1035,9 +1035,8 @@ struct GatherToLDSOpLowering : public 
ConvertOpToLLVMPattern<GatherToLDSOp> {
       if (auto transferVectorType = dyn_cast<VectorType>(transferType)) {
         return transferVectorType.getNumElements() *
                (transferVectorType.getElementTypeBitWidth() / 8);
-      } else {
-        return transferType.getIntOrFloatBitWidth() / 8;
       }
+      return transferType.getIntOrFloatBitWidth() / 8;
     }();
 
     // Currently only 1, 2, and 4 byte loads are supported.
@@ -1049,10 +1048,10 @@ struct GatherToLDSOpLowering : public 
ConvertOpToLLVMPattern<GatherToLDSOp> {
     Value dstPtr = getStridedElementPtr(loc, dstMemRefType, adaptor.getDst(),
                                         (adaptor.getDstIndices()), rewriter);
 
-    rewriter.replaceOpWithNewOp<ROCDL::GlobalLoadLDSOp>(
-        op, srcPtr, dstPtr, createI32Constant(rewriter, loc, loadWidth),
-        createI32Constant(rewriter, loc, 0),
-        createI32Constant(rewriter, loc, 0), ArrayAttr{}, ArrayAttr{},
+    rewriter.replaceOpWithNewOp<ROCDL::LoadToLDSOp>(
+        op, srcPtr, dstPtr, rewriter.getI32IntegerAttr(loadWidth),
+        /*offset=*/rewriter.getI32IntegerAttr(0),
+        /*aux=*/rewriter.getI32IntegerAttr(0), ArrayAttr{}, ArrayAttr{},
         ArrayAttr{});
 
     return success();
diff --git a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp 
b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
index 549a4376a4a04..a0a98a4e86721 100644
--- a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
+++ b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
@@ -61,7 +61,7 @@ LogicalResult PackedStochRoundFp8Op::verify() {
 }
 
 
//===----------------------------------------------------------------------===//
-// FatRawBuferCastOp
+// FatRawBufferCastOp
 
//===----------------------------------------------------------------------===//
 
 /// Convert the type `source` to one with the same sizes and strides - and
@@ -131,6 +131,14 @@ static bool hasWorkgroupMemorySpace(Attribute memorySpace) 
{
   return false;
 }
 
+static bool hasFatRawBufferMemorySpace(Attribute memorySpace) {
+  if (auto intMemorySpace = dyn_cast<IntegerAttr>(memorySpace))
+    return intMemorySpace.getInt() == 7;
+  if (auto gpuMemorySpace = dyn_cast<amdgpu::AddressSpaceAttr>(memorySpace))
+    return gpuMemorySpace.getValue() == amdgpu::AddressSpace::FatRawBuffer;
+  return false;
+}
+
 
//===----------------------------------------------------------------------===//
 // RawBuffer*Op
 
//===----------------------------------------------------------------------===//
@@ -476,9 +484,8 @@ LogicalResult GatherToLDSOp::verify() {
   MemRefType srcType = cast<MemRefType>(getSrc().getType());
   MemRefType dstType = cast<MemRefType>(getDst().getType());
 
-  if (!memref::isStaticShapeAndContiguousRowMajor(dstType))
-    return emitOpError(
-        "destination types must have static shape  and contiguous");
+  if (!dstType.areTrailingDimsContiguous(dstType.getRank()))
+    return emitOpError("destination types must be contiguous");
 
   auto elemType = srcType.getElementType();
   // Check $src and $dst element types are the same.
@@ -497,8 +504,10 @@ LogicalResult GatherToLDSOp::verify() {
   if (transferSize != 8 && transferSize != 16 && transferSize != 32)
     return emitOpError("Transfering type size must be 8, 16, or 32 bits");
 
-  if (!hasGlobalMemorySpace(srcType.getMemorySpace()))
-    return emitOpError("source memory address space must be Global");
+  if (!hasGlobalMemorySpace(srcType.getMemorySpace()) &&
+      !hasFatRawBufferMemorySpace(srcType.getMemorySpace()))
+    return emitOpError(
+        "source memory address space must be global or fat raw buffer");
 
   if (!hasWorkgroupMemorySpace(dstType.getMemorySpace()))
     return emitOpError("destination memory address space must be Workgroup");
diff --git a/mlir/test/Conversion/AMDGPUToROCDL/load_lds.mlir 
b/mlir/test/Conversion/AMDGPUToROCDL/load_lds.mlir
index b1c16bd5db079..cb3539dd11be3 100644
--- a/mlir/test/Conversion/AMDGPUToROCDL/load_lds.mlir
+++ b/mlir/test/Conversion/AMDGPUToROCDL/load_lds.mlir
@@ -2,6 +2,7 @@
 
 #gpu_global_addrspace = 1
 #gpu_lds_addrspace = 3
+#amdgpu_fat_buffer_addrspace = 7
 
 // CHECK-LABEL: func @global_load_to_rocdl_f32
 // CHECK-SAME: (%[[ARG0:.*]]: memref<128x72xf32, 1>)
@@ -21,8 +22,8 @@ func.func @global_load_to_rocdl_f32(%global : 
memref<128x72xf32, #gpu_global_add
 
   // CHECK: %[[ALLOC:.*]] = memref.alloc()
   // CHECK: %[[LDS_DESC:.*]] = builtin.unrealized_conversion_cast
-  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[GLOBAL_DESC]][1] 
-  
+  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[GLOBAL_DESC]][1]
+
   // CHECK: %[[C72:.*]] = llvm.mlir.constant(72 : index) : i64
   // CHECK: %[[MUL:.*]] = llvm.mul %[[IC12]], %[[C72]] : i64
   // CHECK: %[[SRC_OFFSET:.*]] = llvm.add %[[MUL]], %[[IC0]] : i64
@@ -35,8 +36,7 @@ func.func @global_load_to_rocdl_f32(%global : 
memref<128x72xf32, #gpu_global_add
   // CHECK: %[[DST_OFFSET:.*]] = llvm.add %[[MUL_2]], %[[IC0]] : i64
 
   // CHECK: %[[LDS_PTR:.*]] = llvm.getelementptr %[[LDS_BASE]][%[[DST_OFFSET]]]
-  // CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : i32) : i32
-  // CHECK: rocdl.global.load.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], %[[C4]]
+  // CHECK: rocdl.load.to.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], 4
   amdgpu.gather_to_lds %global[%c12, %c0], %alloc[%c32, %c0]
     : f32, memref<128x72xf32, #gpu_global_addrspace>, memref<64x64xf32, 
#gpu_lds_addrspace>
   func.return
@@ -56,8 +56,8 @@ func.func @global_load_to_rocdl_i8(%global : 
memref<128x72xi8, #gpu_global_addrs
 
   // CHECK: %[[ALLOC:.*]] = memref.alloc()
   // CHECK: %[[LDS_DESC:.*]] = builtin.unrealized_conversion_cast %[[ALLOC]]
-  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[GLOBAL_DESC]][1] 
-  
+  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[GLOBAL_DESC]][1]
+
   // CHECK: %[[C72:.*]] = llvm.mlir.constant(72 : index) : i64
   // CHECK: %[[MUL:.*]] = llvm.mul %[[IC12]], %[[C72]] : i64
   // CHECK: %[[SRC_OFFSET:.*]] = llvm.add %[[MUL]], %[[IC0]] : i64
@@ -70,8 +70,7 @@ func.func @global_load_to_rocdl_i8(%global : 
memref<128x72xi8, #gpu_global_addrs
   // CHECK: %[[DST_OFFSET:.*]] = llvm.add %[[MUL_2]], %[[IC0]] : i64
 
   // CHECK: %[[LDS_PTR:.*]] = llvm.getelementptr %[[LDS_BASE]][%[[DST_OFFSET]]]
-  // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32
-  // CHECK: rocdl.global.load.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], %[[C1]]
+  // CHECK: rocdl.load.to.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], 1
   %c0 = arith.constant 0 : index
   %c12 = arith.constant 12 : index
   %c32 = arith.constant 32 : index
@@ -85,7 +84,7 @@ func.func @global_load_to_rocdl_i8(%global : 
memref<128x72xi8, #gpu_global_addrs
 // CHECK-SAME: (%[[ARG0:.*]]: memref<128x72xi16, 1>)
 func.func @global_load_to_rocdl_vec(%global : memref<128x72xi16, 
#gpu_global_addrspace>) {
   // CHECK: %[[GLOBAL_DESC:.*]] = builtin.unrealized_conversion_cast %[[ARG0]]
-  
+
   // CHECK: %[[C0:.*]] = arith.constant 0 : index
   // CHECK: %[[IC0:.*]] = builtin.unrealized_conversion_cast %c0 : index to i64
   // CHECK: %[[C12:.*]] = arith.constant 12 : index
@@ -95,8 +94,8 @@ func.func @global_load_to_rocdl_vec(%global : 
memref<128x72xi16, #gpu_global_add
 
   // CHECK: %[[ALLOC:.*]] = memref.alloc()
   // CHECK: %[[LDS_DESC:.*]] = builtin.unrealized_conversion_cast %[[ALLOC]]
-  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[GLOBAL_DESC]][1] 
-  
+  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[GLOBAL_DESC]][1]
+
   // CHECK: %[[C72:.*]] = llvm.mlir.constant(72 : index) : i64
   // CHECK: %[[MUL:.*]] = llvm.mul %[[IC12]], %[[C72]] : i64
   // CHECK: %[[SRC_OFFSET:.*]] = llvm.add %[[MUL]], %[[IC0]] : i64
@@ -109,8 +108,7 @@ func.func @global_load_to_rocdl_vec(%global : 
memref<128x72xi16, #gpu_global_add
   // CHECK: %[[DST_OFFSET:.*]] = llvm.add %[[MUL_2]], %[[IC0]] : i64
 
   // CHECK: %[[LDS_PTR:.*]] = llvm.getelementptr %[[LDS_BASE]][%[[DST_OFFSET]]]
-  // CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : i32) : i32
-  // CHECK: rocdl.global.load.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], %[[C4]]
+  // CHECK: rocdl.load.to.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], 4
   %c0 = arith.constant 0 : index
   %c12 = arith.constant 12 : index
   %c32 = arith.constant 32 : index
@@ -129,15 +127,52 @@ func.func @global_load_to_rocdl_dynamic_indices(%global : 
memref<512xi32, #gpu_g
   // CHECK: %[[GLOBAL_DESC:.*]] = builtin.unrealized_conversion_cast %[[ARG0]]
   // CHECK: %[[ALLOC:.*]] = memref.alloc()
   // CHECK: %[[LDS_DESC:.*]] = builtin.unrealized_conversion_cast %[[ALLOC]]
-  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[GLOBAL_DESC]][1] 
+  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[GLOBAL_DESC]][1]
   // CHECK: %[[GLOBAL_PTR:.*]] = llvm.getelementptr 
%[[GLOBAL_BASE]][%[[SRCIDX_CAST]]]
   // CHECK: %[[LDS_BASE:.*]] = llvm.extractvalue %[[LDS_DESC]][1]
   // CHECK: %[[LDS_PTR:.*]] = llvm.getelementptr 
%[[LDS_BASE]][%[[DSTIDX_CAST]]]
-  // CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : i32) : i32
-  // CHECK: rocdl.global.load.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], %[[C4]]
+  // CHECK: rocdl.load.to.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], 4
   %alloc = memref.alloc() : memref<4x64xi32, #gpu_lds_addrspace>
   %c0 = arith.constant 0 : index
   amdgpu.gather_to_lds %global[%src_idx], %alloc[%dst_idx, %c0]
     : i32, memref<512xi32, #gpu_global_addrspace>, memref<4x64xi32, 
#gpu_lds_addrspace>
   func.return
 }
+
+// CHECK-LABEL: func @fat_buffer_load_to_rocdl_f32
+// CHECK-SAME: (%[[ARG0:.*]]: memref<128x72xf32, 7>)
+func.func @fat_buffer_load_to_rocdl_f32(%global : memref<128x72xf32, 
#amdgpu_fat_buffer_addrspace>) {
+  %c0 = arith.constant 0 : index
+  %c12 = arith.constant 12 : index
+  %c32 = arith.constant 32 : index
+  %alloc = memref.alloc() : memref<64x64xf32, #gpu_lds_addrspace>
+  // CHECK: %[[BUFFER_DESC:.*]] = builtin.unrealized_conversion_cast %[[ARG0]]
+
+  // CHECK: %[[C0:.*]] = arith.constant 0 : index
+  // CHECK: %[[IC0:.*]] = builtin.unrealized_conversion_cast %c0 : index to i64
+  // CHECK: %[[C12:.*]] = arith.constant 12 : index
+  // CHECK: %[[IC12:.*]] = builtin.unrealized_conversion_cast %[[C12]]
+  // CHECK: %[[C32:.*]] = arith.constant 32 : index
+  // CHECK: %[[IC32:.*]] = builtin.unrealized_conversion_cast %[[C32]]
+
+  // CHECK: %[[ALLOC:.*]] = memref.alloc()
+  // CHECK: %[[LDS_DESC:.*]] = builtin.unrealized_conversion_cast
+  // CHECK: %[[GLOBAL_BASE:.*]] = llvm.extractvalue %[[BUFFER_DESC]][1]
+
+  // CHECK: %[[C72:.*]] = llvm.mlir.constant(72 : index) : i64
+  // CHECK: %[[MUL:.*]] = llvm.mul %[[IC12]], %[[C72]] : i64
+  // CHECK: %[[SRC_OFFSET:.*]] = llvm.add %[[MUL]], %[[IC0]] : i64
+
+  // CHECK: %[[GLOBAL_PTR:.*]] = llvm.getelementptr 
%[[GLOBAL_BASE]][%[[SRC_OFFSET]]]
+  // CHECK: %[[LDS_BASE:.*]] = llvm.extractvalue %[[LDS_DESC]][1]
+
+  // CHECK: %[[C72_1:.*]] = llvm.mlir.constant(72 : index) : i64
+  // CHECK: %[[MUL_2:.*]] = llvm.mul %[[IC32]], %[[C72_1]] : i64
+  // CHECK: %[[DST_OFFSET:.*]] = llvm.add %[[MUL_2]], %[[IC0]] : i64
+
+  // CHECK: %[[LDS_PTR:.*]] = llvm.getelementptr %[[LDS_BASE]][%[[DST_OFFSET]]]
+  // CHECK: rocdl.load.to.lds %[[GLOBAL_PTR]], %[[LDS_PTR]], 4
+  amdgpu.gather_to_lds %global[%c12, %c0], %alloc[%c32, %c0]
+    : f32, memref<128x72xf32, #amdgpu_fat_buffer_addrspace>, memref<64x64xf32, 
#gpu_lds_addrspace>
+  func.return
+}
diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir 
b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index cda1a9ca5f1f6..fbde993891342 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -636,14 +636,17 @@ llvm.func @rocdl.ds.read.tr(%ptr : !llvm.ptr<3>) -> 
vector<4xf16> {
   llvm.return %r3 : vector<4xf16>
 }
 
-llvm.func @rocdl.global.load.lds(%src : !llvm.ptr<1>, %dst: !llvm.ptr<3>) {
-  %aux = llvm.mlir.constant(0 : i32) : i32
-  %offset = llvm.mlir.constant(0 : i32) : i32
-  %size = llvm.mlir.constant(10 : i32) : i32
-
-  //CHECK: rocdl.global.load.lds %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
-  rocdl.global.load.lds %src, %dst, %size, %offset, %aux
+llvm.func @rocdl.load.to.lds(%src : !llvm.ptr<7>, %dst: !llvm.ptr<3>) {
+  // CHECK-LABEL @rocdl.load.to.lds
+  //CHECK: rocdl.load.to.lds %{{.*}}, %{{.*}}, 4, 0, 0 : <7>
+  rocdl.load.to.lds %src, %dst, 4, 0, 0 : <7>
+  llvm.return
+}
 
+llvm.func @rocdl.global.load.lds(%src : !llvm.ptr<1>, %dst: !llvm.ptr<3>) {
+  // CHECK-LABEL @rocdl.global.load.lds
+  //CHECK: rocdl.global.load.lds %{{.*}}, %{{.*}}, 4, 0, 0
+  rocdl.global.load.lds %src, %dst, 4, 0, 0
   llvm.return
 }
 
diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir 
b/mlir/test/Target/LLVMIR/rocdl.mlir
index af47582dd0bfb..b37f0da361950 100644
--- a/mlir/test/Target/LLVMIR/rocdl.mlir
+++ b/mlir/test/Target/LLVMIR/rocdl.mlir
@@ -848,12 +848,15 @@ llvm.func @rocdl.ds.read.tr(%ptr : !llvm.ptr<3>) -> 
vector<4xf16> {
   llvm.return %r3 : vector<4xf16>
 }
 
+llvm.func @rocdl.load.to.lds(%src : !llvm.ptr<7>, %dst: !llvm.ptr<3>) {
+  //CHECK: call void @llvm.amdgcn.load.to.lds.p7
+  rocdl.load.to.lds %src, %dst, 4, 0, 0 : !llvm.ptr<7>
+  llvm.return
+}
+
 llvm.func @rocdl.global.load.lds(%src : !llvm.ptr<1>, %dst: !llvm.ptr<3>) {
-  %aux = llvm.mlir.constant(0 : i32) : i32
-  %offset = llvm.mlir.constant(0 : i32) : i32
-  %size = llvm.mlir.constant(10 : i32) : i32
   //CHECK: call void @llvm.amdgcn.global.load.lds
-  rocdl.global.load.lds %src, %dst, %size, %offset, %aux
+  rocdl.global.load.lds %src, %dst, 4, 0, 0
   llvm.return
 }
 

>From da53e5e64a3f7a7283db3b20c3cdc36454df3af8 Mon Sep 17 00:00:00 2001
From: Krzysztof Drewniak <krzysztof.drewn...@amd.com>
Date: Mon, 28 Apr 2025 18:08:44 +0000
Subject: [PATCH 2/4] Fix clang build, document the operation, add
 error-handling tests

---
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   |  1 +
 .../CodeGenOpenCL/builtins-amdgcn-gfx950.cl   |  2 +-
 .../builtins-amdgcn-load-to-lds-err.cl        | 25 ++++++++
 llvm/docs/AMDGPUUsage.rst                     | 10 ++-
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  1 -
 .../AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll  | 63 +++++++++++++++++--
 6 files changed, 94 insertions(+), 8 deletions(-)
 create mode 100644 clang/test/SemaOpenCL/builtins-amdgcn-load-to-lds-err.cl

diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp 
b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index a32ef1c2a5a12..d1c722c9dc610 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -565,6 +565,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned 
BuiltinID,
     return Builder.CreateCall(F, {Addr});
   }
   case AMDGPU::BI__builtin_amdgcn_load_to_lds: {
+    // Should this have asan instrumentation?
     return emitBuiltinWithOneOverloadedType<5>(*this, E,
                                                Intrinsic::amdgcn_load_to_lds);
   }
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
index 4b73347ac8155..3d81893553c65 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
@@ -1778,7 +1778,7 @@ void test_cvt_sr_f16_f32(global half2 *out, float src, 
uint seed)
 // CHECK-NEXT:    ret void
 //
 void test_load_to_lds_96(global void* src, local void *dst) {
-  __builtin_amdgcn_global_load_lds(src, dst, /*size=*/12, /*offset=*/0, 
/*aux=*/0);
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/12, /*offset=*/0, /*aux=*/0);
 }
 
 // CHECK-LABEL: @test_load_to_lds_128(
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-load-to-lds-err.cl 
b/clang/test/SemaOpenCL/builtins-amdgcn-load-to-lds-err.cl
new file mode 100644
index 0000000000000..d93d724212077
--- /dev/null
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-load-to-lds-err.cl
@@ -0,0 +1,25 @@
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown 
-target-cpu gfx900 -S -verify=gfx,expected -o - %s
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown 
-target-cpu gfx942 -S -verify=gfx,expected -o - %s
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown 
-target-cpu gfx1010 -S -verify=gfx,expected -o - %s
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown 
-target-cpu gfx950 -S -verify=gfx950,expected -o - %s
+// REQUIRES: amdgpu-registered-target
+
+typedef unsigned int u32;
+
+void test_load_to_lds_unsupported_size(global u32* src, local u32 *dst, u32 
size, u32 offset, u32 aux) {
+  __builtin_amdgcn_load_to_lds(src, dst, size, /*offset=*/0, /*aux=*/0); // 
expected-error{{argument to '__builtin_amdgcn_load_to_lds' must be a constant 
integer}}
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/4, offset, /*aux=*/0); // 
expected-error{{argument to '__builtin_amdgcn_load_to_lds' must be a constant 
integer}}
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/4, /*offset=*/0, aux); // 
expected-error{{argument to '__builtin_amdgcn_load_to_lds' must be a constant 
integer}}
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/5, /*offset=*/0, /*aux=*/0); 
// expected-error{{invalid size value}} gfx-note {{size must be 1, 2, or 4}} 
gfx950-note {{size must be 1, 2, 4, 12 or 16}}
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/0, /*offset=*/0, /*aux=*/0); 
// expected-error{{invalid size value}} gfx-note {{size must be 1, 2, or 4}} 
gfx950-note {{size must be 1, 2, 4, 12 or 16}}
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/3, /*offset=*/0, /*aux=*/0); 
// expected-error{{invalid size value}} gfx-note {{size must be 1, 2, or 4}} 
gfx950-note {{size must be 1, 2, 4, 12 or 16}}
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/12, /*offset=*/0, 
/*aux=*/0); // gfx-error{{invalid size value}} gfx-note {{size must be 1, 2, or 
4}}
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/16, /*offset=*/0, 
/*aux=*/0); // gfx-error{{invalid size value}} gfx-note {{size must be 1, 2, or 
4}}
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/-1, /*offset=*/0, 
/*aux=*/0); // expected-error{{invalid size value}} gfx-note {{size must be 1, 
2, or 4}} gfx950-note {{size must be 1, 2, 4, 12 or 16}}
+}
+
+__attribute__((target("gfx950-insts")))
+void test_load_to_lds_via_target_feature(global u32* src, local u32 *dst, u32 
size, u32 offset, u32 aux) {
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/12, /*offset=*/0, /*aux=*/0);
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/16, /*offset=*/0, /*aux=*/0);
+}
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index d1535960a0257..3ee0f8cae3fc2 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1216,7 +1216,15 @@ The AMDGPU backend implements the following LLVM IR 
intrinsics.
                                                    The format is a 64-bit 
concatenation of the MODE and TRAPSTS registers.
 
   :ref:`llvm.set.fpenv<int_set_fpenv>`             Sets the floating point 
environment to the specifies state.
-
+  llvm.amdgcn.load.to.lds.p<1/7>                   Loads values from global 
memory (either in the form of a global
+                                                   a raw fat buffer pointer) 
to LDS. The size of the data copied can be 1, 2,
+                                                   or 4 bytes (and gfx950 also 
allows 12 or 16 bytes). The LDS pointer
+                                                   argument should be 
wavefront-uniform; the global pointer need not be.
+                                                   The LDS pointer is 
implicitly offset by 4 * lane_id bytes for sies <= 4 bytes
+                                                   and 16 * lane_id bytes for 
larger sizes. This lowers to `global_load_lds`,
+                                                   `buffer_load_* ... lds`, or 
`global_load__* ... lds` depnedening on address
+                                                   space and architecture. 
`amdgcn.global.load.lds` has the same semantics as
+                                                   `amdgcn.load.to.lds.p1`.
   llvm.amdgcn.readfirstlane                        Provides direct access to 
v_readfirstlane_b32. Returns the value in
                                                    the lowest active lane of 
the input operand. Currently implemented
                                                    for i16, i32, float, half, 
bfloat, <2 x i16>, <2 x half>, <2 x bfloat>,
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td 
b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 3c9886a01d757..7939ef0cf8620 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2647,7 +2647,6 @@ def int_amdgcn_perm :
 /// the buffer-resource-wrapper pointers (address space 7 and 9).
 /// TODO: add support for address space 5 and scratch_load_lds.
 class AMDGPULoadToLDS :
-  ClangBuiltin<"__builtin_amdgcn_load_to_lds">,
   Intrinsic <
     [],
     [llvm_anyptr_ty,                    // Base pointer to load from. Varies 
per lane.
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll 
b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
index 72ef6963c9976..3a97ddf0ffd21 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 
UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck 
-check-prefixes=GFX950,GFX950-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck 
-check-prefixes=GFX950,GFX950-GISEL %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck 
-check-prefix=GFX950-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck 
-check-prefix=GFX950-GISEL %s
 
 ; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx942 
-filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-SDAG %s
 ; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx942 
-filetype=null < %s 2>&1 | FileCheck -check-prefix=ERR-GISEL %s
@@ -26,6 +26,19 @@ define amdgpu_ps void 
@global_load_lds_dwordx3_vaddr_saddr(ptr addrspace(1) noca
 ; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    global_load_lds_dwordx3 v[0:1], off offset:16 sc0
 ; GFX950-NEXT:    s_endpgm
+; GFX950-SDAG-LABEL: global_load_lds_dwordx3_vaddr_saddr:
+; GFX950-SDAG:       ; %bb.0:
+; GFX950-SDAG-NEXT:    s_mov_b32 m0, s0
+; GFX950-SDAG-NEXT:    s_nop 0
+; GFX950-SDAG-NEXT:    global_load_lds_dwordx3 v[0:1], off offset:16 sc0
+; GFX950-SDAG-NEXT:    s_endpgm
+;
+; GFX950-GISEL-LABEL: global_load_lds_dwordx3_vaddr_saddr:
+; GFX950-GISEL:       ; %bb.0:
+; GFX950-GISEL-NEXT:    s_mov_b32 m0, s0
+; GFX950-GISEL-NEXT:    s_nop 0
+; GFX950-GISEL-NEXT:    global_load_lds_dwordx3 v[0:1], off offset:16 sc0
+; GFX950-GISEL-NEXT:    s_endpgm
   call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) %gptr, ptr 
addrspace(3) %lptr, i32 12, i32 16, i32 1)
   ret void
 }
@@ -38,6 +51,21 @@ define amdgpu_ps void 
@buffer_load_lds_dwordx3_vaddr_saddr(ptr addrspace(7) noca
 ; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    buffer_load_dwordx3 v0, s[0:3], 0 offen offset:16 sc0 lds
 ; GFX950-NEXT:    s_endpgm
+; GFX950-SDAG-LABEL: buffer_load_lds_dwordx3_vaddr_saddr:
+; GFX950-SDAG:       ; %bb.0:
+; GFX950-SDAG-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX950-SDAG-NEXT:    s_mov_b32 m0, s5
+; GFX950-SDAG-NEXT:    s_nop 0
+; GFX950-SDAG-NEXT:    buffer_load_dwordx3 v0, s[0:3], 0 offen offset:16 sc0 
lds
+; GFX950-SDAG-NEXT:    s_endpgm
+;
+; GFX950-GISEL-LABEL: buffer_load_lds_dwordx3_vaddr_saddr:
+; GFX950-GISEL:       ; %bb.0:
+; GFX950-GISEL-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX950-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX950-GISEL-NEXT:    s_nop 0
+; GFX950-GISEL-NEXT:    buffer_load_dwordx3 v0, s[0:3], 0 offen offset:16 sc0 
lds
+; GFX950-GISEL-NEXT:    s_endpgm
   %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
   call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 12, i32 16, i32 1)
   ret void
@@ -54,6 +82,19 @@ define amdgpu_ps void 
@global_load_lds_dwordx4_vaddr_saddr(ptr addrspace(1) noca
 ; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    global_load_lds_dwordx4 v[0:1], off offset:16 sc0
 ; GFX950-NEXT:    s_endpgm
+; GFX950-SDAG-LABEL: global_load_lds_dwordx4_vaddr_saddr:
+; GFX950-SDAG:       ; %bb.0:
+; GFX950-SDAG-NEXT:    s_mov_b32 m0, s0
+; GFX950-SDAG-NEXT:    s_nop 0
+; GFX950-SDAG-NEXT:    global_load_lds_dwordx4 v[0:1], off offset:16 sc0
+; GFX950-SDAG-NEXT:    s_endpgm
+;
+; GFX950-GISEL-LABEL: global_load_lds_dwordx4_vaddr_saddr:
+; GFX950-GISEL:       ; %bb.0:
+; GFX950-GISEL-NEXT:    s_mov_b32 m0, s0
+; GFX950-GISEL-NEXT:    s_nop 0
+; GFX950-GISEL-NEXT:    global_load_lds_dwordx4 v[0:1], off offset:16 sc0
+; GFX950-GISEL-NEXT:    s_endpgm
   call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %gptr, ptr 
addrspace(3) %lptr, i32 16, i32 16, i32 1)
   ret void
 }
@@ -66,10 +107,22 @@ define amdgpu_ps void 
@buffer_load_lds_dwordx4_vaddr_saddr(ptr addrspace(7) noca
 ; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    buffer_load_dwordx4 v0, s[0:3], 0 offen offset:16 sc0 lds
 ; GFX950-NEXT:    s_endpgm
+; GFX950-SDAG-LABEL: buffer_load_lds_dwordx4_vaddr_saddr:
+; GFX950-SDAG:       ; %bb.0:
+; GFX950-SDAG-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX950-SDAG-NEXT:    s_mov_b32 m0, s5
+; GFX950-SDAG-NEXT:    s_nop 0
+; GFX950-SDAG-NEXT:    buffer_load_dwordx4 v0, s[0:3], 0 offen offset:16 sc0 
lds
+; GFX950-SDAG-NEXT:    s_endpgm
+;
+; GFX950-GISEL-LABEL: buffer_load_lds_dwordx4_vaddr_saddr:
+; GFX950-GISEL:       ; %bb.0:
+; GFX950-GISEL-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX950-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX950-GISEL-NEXT:    s_nop 0
+; GFX950-GISEL-NEXT:    buffer_load_dwordx4 v0, s[0:3], 0 offen offset:16 sc0 
lds
+; GFX950-GISEL-NEXT:    s_endpgm
   %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
   call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 16, i32 16, i32 1)
   ret void
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add 
tests below this line:
-; GFX950-GISEL: {{.*}}
-; GFX950-SDAG: {{.*}}

>From 5afd162d25ca7a56ccb9f30b7aa99e61c9847acf Mon Sep 17 00:00:00 2001
From: Krzysztof Drewniak <krzysztof.drewn...@amd.com>
Date: Thu, 1 May 2025 19:59:18 +0000
Subject: [PATCH 3/4] Add HIP tests, plumb the intrinsic through
 infer-address-spaces

---
 clang/test/CodeGenHIP/amdgpu-load-to-lds.hip  | 48 +++++++++++++++++++
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  4 +-
 .../AMDGPU/AMDGPUTargetTransformInfo.cpp      | 10 ++++
 .../AMDGPU/mem-intrinsics.ll                  | 23 +++++++++
 4 files changed, 84 insertions(+), 1 deletion(-)
 create mode 100644 clang/test/CodeGenHIP/amdgpu-load-to-lds.hip

diff --git a/clang/test/CodeGenHIP/amdgpu-load-to-lds.hip 
b/clang/test/CodeGenHIP/amdgpu-load-to-lds.hip
new file mode 100644
index 0000000000000..b7c60bc239d2e
--- /dev/null
+++ b/clang/test/CodeGenHIP/amdgpu-load-to-lds.hip
@@ -0,0 +1,48 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 5
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx950 -emit-llvm 
-fcuda-is-device -o - %s | FileCheck %s
+
+// COM: Most tests are in the OpenCL semastics, this is just a verification 
for HIP
+
+#define __device__ __attribute__((device))
+#define __shared__ __attribute__((shared))
+
+typedef unsigned int u32;
+
+// CHECK-LABEL: define dso_local void @_Z20test_load_to_lds_u32PjS_(
+// CHECK-SAME: ptr noundef [[SRC:%.*]], ptr noundef [[DST:%.*]]) 
#[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[DST_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[SRC_ADDR]] to ptr
+// CHECK-NEXT:    [[DST_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[DST_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[SRC]], ptr [[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store ptr [[DST]], ptr [[DST_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[DST_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[TMP1]] to ptr addrspace(3)
+// CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p0(ptr [[TMP0]], ptr 
addrspace(3) [[TMP2]], i32 4, i32 0, i32 0)
+// CHECK-NEXT:    ret void
+//
+__device__ void test_load_to_lds_u32(u32* src, __shared__ u32 *dst) {
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/4, /*offset=*/0, /*aux=*/0);
+}
+
+// CHECK-LABEL: define dso_local void @_Z20test_load_to_lds_128PvS_(
+// CHECK-SAME: ptr noundef [[SRC:%.*]], ptr noundef [[DST:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[DST_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[SRC_ADDR]] to ptr
+// CHECK-NEXT:    [[DST_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[DST_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[SRC]], ptr [[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store ptr [[DST]], ptr [[DST_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[DST_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[TMP1]] to ptr addrspace(3)
+// CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p0(ptr [[TMP0]], ptr 
addrspace(3) [[TMP2]], i32 16, i32 0, i32 0)
+// CHECK-NEXT:    ret void
+//
+__device__ void test_load_to_lds_128(void* src, __shared__ void *dst) {
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/16, /*offset=*/0, /*aux=*/0);
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td 
b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 7939ef0cf8620..0e5d48ee8f567 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2656,7 +2656,9 @@ class AMDGPULoadToLDS :
      llvm_i32_ty],                      // auxiliary data (imm, cachepolicy 
(bit 0 = sc0,
                                         //                                   
bit 1 = sc1,
                                         //                                   
bit 4 = scc))
-    [IntrWillReturn, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
+    [IntrWillReturn, IntrArgMemOnly,
+     NoCapture<ArgIndex<0>>, ReadOnly<ArgIndex<0>>,
+     NoCapture<ArgIndex<1>>, WriteOnly<ArgIndex<1>>,
      ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, 
IntrNoCallback, IntrNoFree],
      "", [SDNPMemOperand]>;
 def int_amdgcn_load_to_lds : AMDGPULoadToLDS;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 204d3df546bbf..88b0a4b03345c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -1043,6 +1043,7 @@ bool 
GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
   case Intrinsic::amdgcn_is_private:
   case Intrinsic::amdgcn_flat_atomic_fmax_num:
   case Intrinsic::amdgcn_flat_atomic_fmin_num:
+  case Intrinsic::amdgcn_load_to_lds:
     OpIndexes.push_back(0);
     return true;
   default:
@@ -1114,6 +1115,15 @@ Value 
*GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
     II->setCalledFunction(NewDecl);
     return II;
   }
+  case Intrinsic::amdgcn_load_to_lds: {
+    Type *SrcTy = NewV->getType();
+    Module *M = II->getModule();
+    Function *NewDecl =
+        Intrinsic::getOrInsertDeclaration(M, II->getIntrinsicID(), {SrcTy});
+    II->setArgOperand(0, NewV);
+    II->setCalledFunction(NewDecl);
+    return II;
+  }
   default:
     return nullptr;
   }
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll 
b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
index 48becdeba1c6a..9f67859a3f147 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
@@ -170,11 +170,34 @@ define amdgpu_kernel void 
@memmove_flat_to_flat_replace_src_with_group(ptr %dest
   ret void
 }
 
+define amdgpu_kernel void @load_to_lds_global_as_flat(ptr addrspace(1) 
%global.ptr, ptr addrspace(3) %group.ptr) #0 {
+; CHECK-LABEL: define amdgpu_kernel void @load_to_lds_global_as_flat(
+; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], ptr addrspace(3) 
[[GROUP_PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[GLOBAL_PTR]], ptr addrspace(3) [[GROUP_PTR]], i32 4, i32 0, i32 0)
+; CHECK-NEXT:    ret void
+;
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  call void @llvm.amdgcn.load.to.lds.p0(ptr %cast, ptr addrspace(3) 
%group.ptr, i32 4, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_kernel void @load_to_lds_fat_pointer_as_flat(ptr addrspace(7) 
%buffer.fat.ptr, ptr addrspace(3) %group.ptr) #0 {
+; CHECK-LABEL: define amdgpu_kernel void @load_to_lds_fat_pointer_as_flat(
+; CHECK-SAME: ptr addrspace(7) [[BUFFER_FAT_PTR:%.*]], ptr addrspace(3) 
[[GROUP_PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p7(ptr addrspace(7) 
[[BUFFER_FAT_PTR]], ptr addrspace(3) [[GROUP_PTR]], i32 4, i32 0, i32 0)
+; CHECK-NEXT:    ret void
+;
+  %cast = addrspacecast ptr addrspace(7) %buffer.fat.ptr to ptr
+  call void @llvm.amdgcn.load.to.lds.p0(ptr %cast, ptr addrspace(3) 
%group.ptr, i32 4, i32 0, i32 0)
+  ret void
+}
+
 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture 
readonly, i64, i1) #1
 declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr 
nocapture readonly, i64, i1) #1
 declare void @llvm.memcpy.p0.p3.i32(ptr nocapture writeonly, ptr addrspace(3) 
nocapture readonly, i32, i1) #1
 declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture 
readonly, i64, i1) #1
+declare void @llvm.amdgcn.load.to.lds.p0(ptr nocapture readonly, ptr 
addrspace(3) nocapture writeonly, i32 immarg, i32 immarg, i32 immarg) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

>From 62ef0e4eb38c307b68efc5be199a35a410930325 Mon Sep 17 00:00:00 2001
From: Krzysztof Drewniak <krzysztof.drewn...@amd.com>
Date: Fri, 2 May 2025 17:46:24 +0000
Subject: [PATCH 4/4] Add extra test per review comments

---
 clang/test/CodeGenHIP/amdgpu-load-to-lds.hip | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/clang/test/CodeGenHIP/amdgpu-load-to-lds.hip 
b/clang/test/CodeGenHIP/amdgpu-load-to-lds.hip
index b7c60bc239d2e..095c20268f2ec 100644
--- a/clang/test/CodeGenHIP/amdgpu-load-to-lds.hip
+++ b/clang/test/CodeGenHIP/amdgpu-load-to-lds.hip
@@ -28,6 +28,25 @@ __device__ void test_load_to_lds_u32(u32* src, __shared__ 
u32 *dst) {
   __builtin_amdgcn_load_to_lds(src, dst, /*size=*/4, /*offset=*/0, /*aux=*/0);
 }
 
+// CHECK-LABEL: define dso_local void @_Z30test_load_to_lds_u32_flat_destPjS_(
+// CHECK-SAME: ptr noundef [[SRC:%.*]], ptr noundef [[DST:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[SRC_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[DST_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[SRC_ADDR]] to ptr
+// CHECK-NEXT:    [[DST_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[DST_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[SRC]], ptr [[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store ptr [[DST]], ptr [[DST_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[DST_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[TMP1]] to ptr addrspace(3)
+// CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p0(ptr [[TMP0]], ptr 
addrspace(3) [[TMP2]], i32 4, i32 0, i32 0)
+// CHECK-NEXT:    ret void
+//
+__device__ void test_load_to_lds_u32_flat_dest(u32* src, u32 *dst) {
+  __builtin_amdgcn_load_to_lds(src, dst, /*size=*/4, /*offset=*/0, /*aux=*/0);
+}
+
 // CHECK-LABEL: define dso_local void @_Z20test_load_to_lds_128PvS_(
 // CHECK-SAME: ptr noundef [[SRC:%.*]], ptr noundef [[DST:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to