https://github.com/jhuber6 updated https://github.com/llvm/llvm-project/pull/98234
>From 902690cdfd3108babf0059098932c6b72c493a07 Mon Sep 17 00:00:00 2001 From: Joseph Huber <hube...@outlook.com> Date: Tue, 9 Jul 2024 17:17:39 -0500 Subject: [PATCH] [Clang] Add `__CLANG_GPU_DISABLE_MATH_WRAPPERS` macro for offloading math Summary: Currently we replace all math calls with vendor specific ones. This patch introduces a macro `__CLANG_GPU_DISABLE_MATH_WRAPPERS` that when defined will disable this. I went this route instead of a flag for two reasons. One, I think we have too many flags as is, and we already have `-nogpuinc` to cover disabling these wrappers entirely, so this would be a really specific subset of that. Second, these math headers aren't easily decoupled by simply not including a single header from the clang driver layer. There's the cmath and the regular math forward declares it would disable as well. Note, this currently causes errors because the GPU `libm` doesn't have `powi`, that's an NVIDIA extension I'll add to LLVM libm. --- clang/lib/Headers/__clang_cuda_math.h | 5 +++ clang/lib/Headers/__clang_hip_math.h | 5 +++ clang/test/Headers/gpu_disabled_math.cpp | 41 ++++++++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 clang/test/Headers/gpu_disabled_math.cpp diff --git a/clang/lib/Headers/__clang_cuda_math.h b/clang/lib/Headers/__clang_cuda_math.h index 04019165068668..44c6e9a4e48d1b 100644 --- a/clang/lib/Headers/__clang_cuda_math.h +++ b/clang/lib/Headers/__clang_cuda_math.h @@ -12,6 +12,10 @@ #error "This file is for CUDA compilation only." #endif +// The __CLANG_GPU_DISABLE_MATH_WRAPPERS macro provides a way to let standard +// libcalls reach the link step instead of being eagerly replaced. +#ifndef __CLANG_GPU_DISABLE_MATH_WRAPPERS + #ifndef __OPENMP_NVPTX__ #if CUDA_VERSION < 9000 #error This file is intended to be used with CUDA-9+ only. @@ -345,4 +349,5 @@ __DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); } #pragma pop_macro("__DEVICE_VOID__") #pragma pop_macro("__FAST_OR_SLOW") +#endif // __CLANG_GPU_DISABLE_MATH_WRAPPERS #endif // __CLANG_CUDA_MATH_H__ diff --git a/clang/lib/Headers/__clang_hip_math.h b/clang/lib/Headers/__clang_hip_math.h index 11e1e7d032586f..8468751d9de260 100644 --- a/clang/lib/Headers/__clang_hip_math.h +++ b/clang/lib/Headers/__clang_hip_math.h @@ -13,6 +13,10 @@ #error "This file is for HIP and OpenMP AMDGCN device compilation only." #endif +// The __CLANG_GPU_DISABLE_MATH_WRAPPERS macro provides a way to let standard +// libcalls reach the link step instead of being eagerly replaced. +#ifndef __CLANG_GPU_DISABLE_MATH_WRAPPERS + #if !defined(__HIPCC_RTC__) #include <limits.h> #include <stdint.h> @@ -1321,4 +1325,5 @@ __host__ inline static int max(int __arg1, int __arg2) { #pragma pop_macro("__RETURN_TYPE") #pragma pop_macro("__FAST_OR_SLOW") +#endif // __CLANG_GPU_DISABLE_MATH_WRAPPERS #endif // __CLANG_HIP_MATH_H__ diff --git a/clang/test/Headers/gpu_disabled_math.cpp b/clang/test/Headers/gpu_disabled_math.cpp new file mode 100644 index 00000000000000..6e697f52120aeb --- /dev/null +++ b/clang/test/Headers/gpu_disabled_math.cpp @@ -0,0 +1,41 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// RUN: %clang_cc1 -include __clang_hip_runtime_wrapper.h \ +// RUN: -internal-isystem %S/../../lib/Headers/cuda_wrappers \ +// RUN: -internal-isystem %S/Inputs/include \ +// RUN: -triple amdgcn-amd-amdhsa -aux-triple x86_64-unknown-unknown \ +// RUN: -target-cpu gfx906 -emit-llvm %s -fcuda-is-device -o - \ +// RUN: -D __CLANG_GPU_DISABLE_MATH_WRAPPERS | FileCheck -check-prefix=AMDGPU %s + +// RUN: %clang_cc1 -include __clang_cuda_runtime_wrapper.h \ +// RUN: -internal-isystem %S/../../lib/Headers/cuda_wrappers \ +// RUN: -internal-isystem %S/Inputs/include \ +// RUN: -triple nvptx64-nvidia-cuda -aux-triple x86_64-unknown-unknown \ +// RUN: -target-cpu sm_90 -emit-llvm %s -fcuda-is-device -o - \ +// RUN: -D __CLANG_GPU_DISABLE_MATH_WRAPPERS | FileCheck -check-prefix=NVPTX %s + +extern "C" double sin(double x); + +// AMDGPU-LABEL: define dso_local noundef double @_Z3food( +// AMDGPU-SAME: double noundef [[X:%.*]]) #[[ATTR0:[0-9]+]] { +// AMDGPU-NEXT: [[ENTRY:.*:]] +// AMDGPU-NEXT: [[RETVAL:%.*]] = alloca double, align 8, addrspace(5) +// AMDGPU-NEXT: [[X_ADDR:%.*]] = alloca double, align 8, addrspace(5) +// AMDGPU-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr +// AMDGPU-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr +// AMDGPU-NEXT: store double [[X]], ptr [[X_ADDR_ASCAST]], align 8 +// AMDGPU-NEXT: [[TMP0:%.*]] = load double, ptr [[X_ADDR_ASCAST]], align 8 +// AMDGPU-NEXT: [[TMP1:%.*]] = call double @llvm.sin.f64(double [[TMP0]]) +// AMDGPU-NEXT: ret double [[TMP1]] +// +// NVPTX-LABEL: define dso_local noundef double @_Z3food( +// NVPTX-SAME: double noundef [[X:%.*]]) #[[ATTR0:[0-9]+]] { +// NVPTX-NEXT: [[ENTRY:.*:]] +// NVPTX-NEXT: [[X_ADDR:%.*]] = alloca double, align 8 +// NVPTX-NEXT: store double [[X]], ptr [[X_ADDR]], align 8 +// NVPTX-NEXT: [[TMP0:%.*]] = load double, ptr [[X_ADDR]], align 8 +// NVPTX-NEXT: [[TMP1:%.*]] = call double @llvm.sin.f64(double [[TMP0]]) +// NVPTX-NEXT: ret double [[TMP1]] +// +double foo(double x) { + return sin(x); +} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits