arsenm created this revision.
arsenm added reviewers: yaxunl, AMDGPU.
Herald added subscribers: kosarev, Anastasia, tpr, dstuttard, jvesely, kzhuravl.
Herald added a project: All.
arsenm requested review of this revision.
Herald added a subscriber: wdng.
This makes the scope and ordering arguments actually do something.
Also add some new OpenCL tests since the existing HIP tests didn't
cover address spaces.
https://reviews.llvm.org/D137524
Files:
clang/lib/CodeGen/CGBuiltin.cpp
clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp
clang/test/CodeGenOpenCL/builtins-amdgcn.cl
Index: clang/test/CodeGenOpenCL/builtins-amdgcn.cl
===================================================================
--- clang/test/CodeGenOpenCL/builtins-amdgcn.cl
+++ clang/test/CodeGenOpenCL/builtins-amdgcn.cl
@@ -794,6 +794,26 @@
__builtin_amdgcn_s_setreg(8193, val);
}
+// CHECK-LABEL test_atomic_inc_dec(
+void test_atomic_inc_dec(local uint *lptr, global uint *gptr, uint val) {
+ uint res;
+
+ // CHECK: atomicrmw uinc_wrap i32 addrspace(3)* %lptr, i32 %val syncscope("workgroup") seq_cst, align 4
+ res = __builtin_amdgcn_atomic_inc32(lptr, val, __ATOMIC_SEQ_CST, "workgroup");
+
+ // CHECK: atomicrmw udec_wrap i32 addrspace(3)* %lptr, i32 %val syncscope("workgroup") seq_cst, align 4
+ res = __builtin_amdgcn_atomic_dec32(lptr, val, __ATOMIC_SEQ_CST, "workgroup");
+
+ // CHECK: atomicrmw uinc_wrap i32 addrspace(1)* %gptr, i32 %val syncscope("agent") seq_cst, align 4
+ res = __builtin_amdgcn_atomic_inc32(gptr, val, __ATOMIC_SEQ_CST, "agent");
+
+ // CHECK: atomicrmw udec_wrap i32 addrspace(1)* %gptr, i32 %val seq_cst, align 4
+ res = __builtin_amdgcn_atomic_dec32(gptr, val, __ATOMIC_SEQ_CST, "");
+
+ // CHECK: atomicrmw volatile udec_wrap i32 addrspace(1)* %gptr, i32 %val seq_cst, align 4
+ res = __builtin_amdgcn_atomic_dec32((volatile global uint*)gptr, val, __ATOMIC_SEQ_CST, "");
+}
+
// CHECK-DAG: [[$WI_RANGE]] = !{i32 0, i32 1024}
// CHECK-DAG: [[$WS_RANGE]] = !{i16 1, i16 1025}
// CHECK-DAG: attributes #[[$NOUNWIND_READONLY:[0-9]+]] = { nofree nounwind memory(read) }
Index: clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp
===================================================================
--- clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp
+++ clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 %s -x hip -fcuda-is-device -emit-llvm -O0 -o - \
-// RUN: -triple=amdgcn-amd-amdhsa | opt -S | FileCheck %s
+// RUN: -triple=amdgcn-amd-amdhsa | FileCheck %s
// CHECK-LABEL: @_Z29test_non_volatile_parameter32Pj(
// CHECK-NEXT: entry:
@@ -13,12 +13,12 @@
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
-// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr [[TMP0]], i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[RES_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
-// CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr [[TMP4]], i32 [[TMP6]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4
// CHECK-NEXT: ret void
//
@@ -39,12 +39,12 @@
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
-// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr [[TMP0]], i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP3]], ptr [[RES_ASCAST]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 8
-// CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr [[TMP4]], i64 [[TMP6]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8
// CHECK-NEXT: ret void
//
@@ -65,12 +65,12 @@
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load volatile i32, ptr [[TMP1]], align 4
-// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr [[TMP0]], i32 [[TMP2]], i32 7, i32 2, i1 true)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[RES_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = load volatile i32, ptr [[TMP5]], align 4
-// CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr [[TMP4]], i32 [[TMP6]], i32 7, i32 2, i1 true)
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4
// CHECK-NEXT: ret void
//
@@ -91,12 +91,12 @@
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load volatile i64, ptr [[TMP1]], align 8
-// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr [[TMP0]], i64 [[TMP2]], i32 7, i32 2, i1 true)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP3]], ptr [[RES_ASCAST]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = load volatile i64, ptr [[TMP5]], align 8
-// CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr [[TMP4]], i64 [[TMP6]], i32 7, i32 2, i1 true)
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8
// CHECK-NEXT: ret void
//
@@ -110,10 +110,10 @@
// CHECK-LABEL: @_Z13test_shared32v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
// CHECK-NEXT: ret void
//
@@ -128,10 +128,10 @@
// CHECK-LABEL: @_Z13test_shared64v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
// CHECK-NEXT: ret void
//
@@ -147,10 +147,10 @@
// CHECK-LABEL: @_Z13test_global32v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
-// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
// CHECK-NEXT: ret void
//
@@ -164,10 +164,10 @@
// CHECK-LABEL: @_Z13test_global64v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
-// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
// CHECK-NEXT: ret void
//
@@ -183,10 +183,10 @@
// CHECK-NEXT: [[LOCAL_VAL:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_VAL]] to ptr
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP1]], ptr [[LOCAL_VAL_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), align 4
-// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 4
// CHECK-NEXT: ret void
//
@@ -204,10 +204,10 @@
// CHECK-NEXT: [[LOCAL_VAL:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_VAL]] to ptr
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP1]], ptr [[LOCAL_VAL_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), align 8
-// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 8
// CHECK-NEXT: ret void
//
@@ -222,22 +222,22 @@
// CHECK-LABEL: @_Z12test_order32v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP0]], i32 2, i32 2, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP0]] syncscope("workgroup") monotonic, align 4
// CHECK-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP2]], i32 4, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") acquire, align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP4]], i32 4, i32 2, i1 false)
+// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP4]] syncscope("workgroup") acquire, align 4
// CHECK-NEXT: store i32 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP6]], i32 5, i32 2, i1 false)
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP6]] syncscope("workgroup") release, align 4
// CHECK-NEXT: store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP8]], i32 6, i32 2, i1 false)
+// CHECK-NEXT: [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP8]] syncscope("workgroup") acq_rel, align 4
// CHECK-NEXT: store i32 [[TMP9]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP10]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP10]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
// CHECK-NEXT: ret void
//
@@ -260,22 +260,22 @@
// CHECK-LABEL: @_Z12test_order64v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP0]], i32 2, i32 2, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP0]] syncscope("workgroup") monotonic, align 8
// CHECK-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP2]], i32 4, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") acquire, align 8
// CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP4]], i32 4, i32 2, i1 false)
+// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP4]] syncscope("workgroup") acquire, align 8
// CHECK-NEXT: store i64 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP6]], i32 5, i32 2, i1 false)
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP6]] syncscope("workgroup") release, align 8
// CHECK-NEXT: store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP8]], i32 6, i32 2, i1 false)
+// CHECK-NEXT: [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP8]] syncscope("workgroup") acq_rel, align 8
// CHECK-NEXT: store i64 [[TMP9]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP10]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP10]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
// CHECK-NEXT: ret void
//
@@ -298,16 +298,16 @@
// CHECK-LABEL: @_Z12test_scope32v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP0]], i32 7, i32 1, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP0]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP4]], i32 7, i32 3, i1 false)
+// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP4]] syncscope("agent") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
-// CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP6]], i32 7, i32 4, i1 false)
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP6]] syncscope("wavefront") seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
// CHECK-NEXT: ret void
//
@@ -326,16 +326,16 @@
// CHECK-LABEL: @_Z12test_scope64v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP0]], i32 7, i32 1, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP0]] seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP4]], i32 7, i32 3, i1 false)
+// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP4]] syncscope("agent") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
// CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
-// CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP6]], i32 7, i32 4, i1 false)
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP6]] syncscope("wavefront") seq_cst, align 8
// CHECK-NEXT: store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
// CHECK-NEXT: ret void
//
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -17264,40 +17264,33 @@
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
- unsigned BuiltinAtomicOp;
- llvm::Type *ResultType = ConvertType(E->getType());
-
+ llvm::AtomicRMWInst::BinOp BinOp;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
- BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
+ BinOp = llvm::AtomicRMWInst::UIncWrap;
break;
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
- BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
+ BinOp = llvm::AtomicRMWInst::UDecWrap;
break;
}
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
- llvm::Function *F =
- CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
-
ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(3)), AO, SSID);
- // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
- // scope as unsigned values
- Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
- Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
-
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
- PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
+ llvm::AtomicRMWInst *RMW =
+ Builder.CreateAtomicRMW(BinOp, Ptr, Val, AO, SSID);
+ if (Volatile)
+ RMW->setVolatile(true);
+ return RMW;
}
case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn:
case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: {
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits