gandhi21299 updated this revision to Diff 362433.
gandhi21299 added a comment.

- added more tests with supported combinations of address spaces
- minor nits


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D106909/new/

https://reviews.llvm.org/D106909

Files:
  clang/include/clang/Basic/BuiltinsAMDGPU.def
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGenOpenCL/builtins-fp-atomics.cl

Index: clang/test/CodeGenOpenCL/builtins-fp-atomics.cl
===================================================================
--- /dev/null
+++ clang/test/CodeGenOpenCL/builtins-fp-atomics.cl
@@ -0,0 +1,210 @@
+// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -target-cpu gfx90a \
+// RUN:   %s -S -emit-llvm -o - | FileCheck %s -check-prefix=CHECK
+
+// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -target-cpu gfx90a \
+// RUN:   -S -o - %s | FileCheck -check-prefix=GFX90A %s
+
+
+typedef enum memory_order {
+  memory_order_relaxed = __ATOMIC_RELAXED,
+  memory_order_acquire = __ATOMIC_ACQUIRE,
+  memory_order_release = __ATOMIC_RELEASE,
+  memory_order_acq_rel = __ATOMIC_ACQ_REL,
+  memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+typedef half __attribute__((ext_vector_type(2))) half2;
+
+// CHECK-LABEL: test_global_add
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_global_add
+// GFX90A:  global_atomic_add_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_add(__global double *addr, double x) {
+  __builtin_amdgcn_global_atomic_fadd_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_constant_add
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fadd.f64.p4f64.f64(double addrspace(4)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_global_constant_add
+// GFX90A:  global_atomic_add_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_constant_add(__constant double *addr, double x) {
+  __builtin_amdgcn_global_atomic_fadd_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_local_add
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fadd.f64.p3f64.f64(double addrspace(3)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_global_local_add
+// GFX90A:  ds_add_f64
+// GFX90A:  s_endpgm
+kernel void test_global_local_add(__local double *addr, double x) {
+  __builtin_amdgcn_global_atomic_fadd_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_addf
+// CHECK: tail call float @llvm.amdgcn.global.atomic.fadd.f32.p1f32.f32(float addrspace(1)* %{{.*}}, float %{{.*}})
+// GFX90A-LABEL: test_global_addf
+// GFX90A: global_atomic_add_f32 v0, v1, s[0:1]
+// GFX90A: s_endpgm
+kernel void test_global_addf(__global float *addr, float x) {
+  __builtin_amdgcn_global_atomic_fadd_f32(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_constant_addf
+// CHECK: tail call float @llvm.amdgcn.global.atomic.fadd.f32.p4f32.f32(float addrspace(4)* %{{.*}}, float %{{.*}})
+// GFX90A-LABEL: test_global_constant_addf
+// GFX90A: global_atomic_add_f32 v0, v1, s[0:1]
+// GFX90A: s_endpgm
+kernel void test_global_constant_addf(__constant float *addr, float x) {
+  __builtin_amdgcn_global_atomic_fadd_f32(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_local_addf
+// CHECK: tail call float @llvm.amdgcn.global.atomic.fadd.f32.p3f32.f32(float addrspace(3)* %{{.*}}, float %{{.*}})
+// GFX90A-LABEL: test_global_local_addf
+// GFX90A: ds_add_f32
+// GFX90A: s_endpgm
+kernel void test_global_local_addf(__local float *addr, float x) {
+  __builtin_amdgcn_global_atomic_fadd_f32(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_add2h
+// CHECK: tail call <2 x half> @llvm.amdgcn.global.atomic.fadd.v2f16.p1v2f16.v2f16(<2 x half> addrspace(1)* %{{.*}}, <2 x half> %{{.*}})
+// GFX90A-LABEL: test_global_add2h
+// GFX90A: global_atomic_pk_add_f16 v0, v1, s[0:1]
+// GFX90A: s_endpgm
+kernel void test_global_add2h(__global half2 *addr, half2 x){
+  __builtin_amdgcn_global_atomic_fadd_2f16(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_constant_add2h
+// CHECK: tail call <2 x half> @llvm.amdgcn.global.atomic.fadd.v2f16.p4v2f16.v2f16(<2 x half> addrspace(4)* %{{.*}}, <2 x half> %{{.*}})
+// GFX90A-LABEL: test_global_constant_add2h
+// GFX90A: global_atomic_pk_add_f16
+kernel void test_global_constant_add2h(__constant half2 *addr, half2 x){
+  __builtin_amdgcn_global_atomic_fadd_2f16(addr, x, memory_order_relaxed);
+}
+
+// CaHECK-LABEL: test_global_local_add2h
+// CHaECK: tail call <2 x half> @llvm.amdgcn.global.atomic.fadd.v2f16.p3v2f16.v2f16(<2 x half> addrspace(3)* %{{.*}}, <2 x half> %{{.*}})
+// kernel void test_global_local_add2h(__local half2 *addr, half2 x){
+//   __builtin_amdgcn_global_atomic_fadd_2f16(addr, x, memory_order_relaxed); // expected-error{{fatal error: error in backend: Cannot select: t22: v2f16,ch = AtomicLoadFAdd<(volatile dereferenceable load store (s32) on %ir.addr.load, addrspace 3)>}}
+// }
+
+// CHECK-LABEL: test_global_global_min
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A-LABEL:  test_global_global_min
+// GFX90A:  global_atomic_min_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_global_min(__global double *addr, double x){
+  __builtin_amdgcn_global_atomic_fmin_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_constant_min
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fmin.f64.p4f64.f64(double addrspace(4)* %{{.*}}, double %{{.*}})
+// GFX90A-LABEL:  test_global_constant_min
+// GFX90A:  global_atomic_min_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_constant_min(__constant double *addr, double x){
+  __builtin_amdgcn_global_atomic_fmin_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_max
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_global_max
+// GFX90A:  global_atomic_max_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_max(__global double *addr, double x){
+  __builtin_amdgcn_global_atomic_fmax_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_global_constant_max
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fmax.f64.p4f64.f64(double addrspace(4)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_global_constant_max
+// GFX90A:  global_atomic_max_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_constant_max(__constant double *addr, double x){
+  __builtin_amdgcn_global_atomic_fmax_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_flat_add_local
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fadd.f64.p3f64.f64(double addrspace(3)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_add_local
+// GFX90A:  ds_add_f64 v2, v[0:1]
+// GFX90A:  s_endpgm
+kernel void test_flat_add_local(__local double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fadd_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_flat_global_add
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_global_add
+// GFX90A:  global_atomic_add_f64
+// GFX90A:  s_endpgm
+kernel void test_flat_global_add(__global double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fadd_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_flat_constant_add
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fadd.f64.p4f64.f64(double addrspace(4)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_constant_add
+// GFX90A:  global_atomic_add_f64
+// GFX90A:  s_endpgm
+kernel void test_flat_constant_add(__constant double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fadd_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_flat_min_constant
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fmin.f64.p4f64.f64(double addrspace(4)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_min_constant
+// GFX90A:  global_atomic_min_f64
+// GFX90A:  s_endpgm
+kernel void test_flat_min_constant(__constant double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fmin_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_flat_global_min
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_global_min
+// GFX90A:  global_atomic_min_f64
+// GFX90A:  s_endpgm
+kernel void test_flat_global_min(__global double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fmin_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_flat_max_constant
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fmax.f64.p4f64.f64(double addrspace(4)* %{{.*}}, double %{{.*}})
+// GFX90A-LABEL:  test_flat_max_constant
+// GFX90A:  global_atomic_max_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_flat_max_constant(__constant double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fmax_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_flat_global_max
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A-LABEL:  test_flat_global_max
+// GFX90A:  global_atomic_max_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_flat_global_max(__global double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fmax_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_ds_add_local
+// CHECK: tail call double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* %{{.*}}, double %{{.*}},
+// GFX90A:  test_ds_add_local
+// GFX90A:  ds_add_f64 v2, v[0:1]
+// GFX90A:  s_endpgm
+kernel void test_ds_add_local(__local double *addr, double x){
+  __builtin_amdgcn_ds_atomic_fadd_f64(addr, x, memory_order_relaxed);
+}
+
+// CHECK-LABEL: test_ds_addf_local
+// CHECK: tail call float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* %{{.*}}, float %{{.*}},
+// GFX90A-LABEL:  test_ds_addf_local
+// GFX90A:  ds_add_f32 v0, v1
+// GFX90A:  s_endpgm
+kernel void test_ds_addf_local(__local float *addr, float x){
+  __builtin_amdgcn_ds_atomic_fadd_f32(addr, x, memory_order_relaxed);
+}
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -15826,6 +15826,71 @@
     Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
     return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
   }
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_2f16:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: {
+    Intrinsic::ID IID;
+    llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
+    switch (BuiltinID) {
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
+      ArgTy = llvm::Type::getFloatTy(getLLVMContext());
+      IID = Intrinsic::amdgcn_global_atomic_fadd;
+      break;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_2f16:
+      ArgTy = llvm::FixedVectorType::get(
+          llvm::Type::getHalfTy(getLLVMContext()), 2);
+      LLVM_FALLTHROUGH;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
+      IID = Intrinsic::amdgcn_global_atomic_fadd;
+      break;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
+      IID = Intrinsic::amdgcn_global_atomic_fmin;
+      break;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
+      IID = Intrinsic::amdgcn_global_atomic_fmax;
+      break;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
+      IID = Intrinsic::amdgcn_flat_atomic_fadd;
+      break;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
+      IID = Intrinsic::amdgcn_flat_atomic_fmin;
+      break;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
+      IID = Intrinsic::amdgcn_flat_atomic_fmax;
+      break;
+    }
+    llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+    llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+    llvm::Function *F =
+        CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
+    return Builder.CreateCall(F, {Addr, Val});
+  }
+  case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
+  case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: {
+    Intrinsic::ID IID;
+    llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
+    switch (BuiltinID) {
+    case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
+      ArgTy = llvm::Type::getFloatTy(getLLVMContext());
+      LLVM_FALLTHROUGH;
+    case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
+      IID = Intrinsic::amdgcn_ds_fadd;
+      break;
+    }
+    llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+    llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+    llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue(
+        llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
+    llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue(
+        llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
+    llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
+    return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
+  }
   case AMDGPU::BI__builtin_amdgcn_read_exec: {
     CallInst *CI = cast<CallInst>(
       EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
Index: clang/include/clang/Basic/BuiltinsAMDGPU.def
===================================================================
--- clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -196,6 +196,19 @@
 
 TARGET_BUILTIN(__builtin_amdgcn_fmed3h, "hhhh", "nc", "gfx9-insts")
 
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_f64, "dd*1di", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_f32, "ff*1fi", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_2f16, "hh*1hi", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fmin_f64, "dd*1di", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fmax_f64, "dd*1di", "t", "gfx90a-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_f64, "dd*1di", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fmin_f64, "dd*1di", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fmax_f64, "dd*1di", "t", "gfx90a-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_f64, "dd*3di", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_f32, "ff*3fi", "t", "gfx90a-insts")
+
 //===----------------------------------------------------------------------===//
 // Deep learning builtins.
 //===----------------------------------------------------------------------===//
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to