yaxunl updated this revision to Diff 142422.
yaxunl marked an inline comment as done.
yaxunl added a comment.
Revised by John's comments.
https://reviews.llvm.org/D44984
Files:
include/clang/Basic/LangOptions.def
lib/CodeGen/CGCUDANV.cpp
lib/CodeGen/CGDecl.cpp
lib/CodeGen/CodeGenModule.cpp
lib/Frontend/CompilerInvocation.cpp
lib/Frontend/InitPreprocessor.cpp
lib/Sema/SemaCUDA.cpp
lib/Sema/SemaDecl.cpp
test/CodeGenCUDA/device-stub.cu
Index: test/CodeGenCUDA/device-stub.cu
===================================================================
--- test/CodeGenCUDA/device-stub.cu
+++ test/CodeGenCUDA/device-stub.cu
@@ -1,8 +1,11 @@
// RUN: echo "GPU binary would be here" > %t
-// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -fcuda-include-gpubinary %t -o - | FileCheck %s
-// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -fcuda-include-gpubinary %t -o - -DNOGLOBALS \
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
+// RUN: -fcuda-include-gpubinary %t -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
+// RUN: -fcuda-include-gpubinary %t -o - -DNOGLOBALS \
// RUN: | FileCheck %s -check-prefix=NOGLOBALS
-// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=NOGPUBIN
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \
+// RUN: | FileCheck %s -check-prefix=NOGPUBIN
#include "Inputs/cuda.h"
@@ -77,10 +80,14 @@
// Test that we've built a function to register kernels and global vars.
// CHECK: define internal void @__cuda_register_globals
// CHECK: call{{.*}}cudaRegisterFunction(i8** %0, {{.*}}kernelfunc
-// CHECK-DAG: call{{.*}}cudaRegisterVar(i8** %0, {{.*}}device_var{{.*}}i32 0, i32 4, i32 0, i32 0
-// CHECK-DAG: call{{.*}}cudaRegisterVar(i8** %0, {{.*}}constant_var{{.*}}i32 0, i32 4, i32 1, i32 0
-// CHECK-DAG: call{{.*}}cudaRegisterVar(i8** %0, {{.*}}ext_device_var{{.*}}i32 1, i32 4, i32 0, i32 0
-// CHECK-DAG: call{{.*}}cudaRegisterVar(i8** %0, {{.*}}ext_constant_var{{.*}}i32 1, i32 4, i32 1, i32 0
+// CHECK-DAG: call{{.*}}cudaRegisterVar(i8** %0, {{.*}}device_var{{.*}}
+// CHECK-DAG-SAME: i32 0, i32 4, i32 0, i32 0
+// CHECK-DAG: call{{.*}}cudaRegisterVar(i8** %0, {{.*}}constant_var{{.*}}
+// CHECK-DAG-SAME: i32 0, i32 4, i32 1, i32 0
+// CHECK-DAG: call{{.*}}cudaRegisterVar(i8** %0, {{.*}}ext_device_var{{.*}}
+// CHECK-DAG-SAME: i32 1, i32 4, i32 0, i32 0
+// CHECK-DAG: call{{.*}}cudaRegisterVar(i8** %0, {{.*}}ext_constant_var{{.*}}
+// CHECK-DAG-SAME: i32 1, i32 4, i32 1, i32 0
// CHECK: ret void
// Test that we've built contructor..
Index: lib/Sema/SemaDecl.cpp
===================================================================
--- lib/Sema/SemaDecl.cpp
+++ lib/Sema/SemaDecl.cpp
@@ -9048,11 +9048,13 @@
if (getLangOpts().CUDA) {
IdentifierInfo *II = NewFD->getIdentifier();
- if (II && II->isStr("cudaConfigureCall") && !NewFD->isInvalidDecl() &&
+ if (II &&
+ ((getLangOpts().HIP && II->isStr("hipConfigureCall")) ||
+ (!getLangOpts().HIP && II->isStr("cudaConfigureCall"))) &&
+ !NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
Diag(NewFD->getLocation(), diag::err_config_scalar_return);
-
Context.setcudaConfigureCallDecl(NewFD);
}
Index: lib/Sema/SemaCUDA.cpp
===================================================================
--- lib/Sema/SemaCUDA.cpp
+++ lib/Sema/SemaCUDA.cpp
@@ -42,8 +42,9 @@
SourceLocation GGGLoc) {
FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
if (!ConfigDecl)
- return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
- << "cudaConfigureCall");
+ return ExprError(
+ Diag(LLLLoc, diag::err_undeclared_var_use)
+ << (getLangOpts().HIP ? "hipConfigureCall" : "cudaConfigureCall"));
QualType ConfigQTy = ConfigDecl->getType();
DeclRefExpr *ConfigDR = new (Context)
Index: lib/Frontend/InitPreprocessor.cpp
===================================================================
--- lib/Frontend/InitPreprocessor.cpp
+++ lib/Frontend/InitPreprocessor.cpp
@@ -465,6 +465,8 @@
Builder.defineMacro("__ASSEMBLER__");
if (LangOpts.CUDA)
Builder.defineMacro("__CUDA__");
+ if (LangOpts.HIP)
+ Builder.defineMacro("__HIP__");
}
/// Initialize the predefined C++ language feature test macros defined in
Index: lib/Frontend/CompilerInvocation.cpp
===================================================================
--- lib/Frontend/CompilerInvocation.cpp
+++ lib/Frontend/CompilerInvocation.cpp
@@ -1564,6 +1564,7 @@
.Case("c", InputKind::C)
.Case("cl", InputKind::OpenCL)
.Case("cuda", InputKind::CUDA)
+ .Case("hip", InputKind::CUDA)
.Case("c++", InputKind::CXX)
.Case("objective-c", InputKind::ObjC)
.Case("objective-c++", InputKind::ObjCXX)
@@ -2075,6 +2076,11 @@
Opts.IncludeDefaultHeader = Args.hasArg(OPT_finclude_default_header);
+ if (const Arg *A = Args.getLastArg(OPT_x)) {
+ if (!strcmp(A->getValue(), "hip"))
+ Opts.HIP = true;
+ }
+
llvm::Triple T(TargetOpts.Triple);
CompilerInvocation::setLangDefaults(Opts, IK, T, PPOpts, LangStd);
Index: lib/CodeGen/CodeGenModule.cpp
===================================================================
--- lib/CodeGen/CodeGenModule.cpp
+++ lib/CodeGen/CodeGenModule.cpp
@@ -3608,6 +3608,9 @@
MaybeHandleStaticInExternC(D, Fn);
+ if ((getTriple().getArch() == llvm::Triple::amdgcn) &&
+ D->hasAttr<CUDAGlobalAttr>())
+ Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
maybeSetTrivialComdat(*D, *Fn);
CodeGenFunction(*this).GenerateCode(D, Fn, FI);
Index: lib/CodeGen/CGDecl.cpp
===================================================================
--- lib/CodeGen/CGDecl.cpp
+++ lib/CodeGen/CGDecl.cpp
@@ -407,6 +407,12 @@
if (D.getInit() && !isCudaSharedVar)
var = AddInitializerToStaticVarDecl(D, var);
+ // The AMDGPU BE does not support zeroinitializer for AS3
+ if (isCudaSharedVar && (getContext().getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::amdgcn))
+ var->setInitializer(
+ llvm::UndefValue::get(var->getType()->getElementType()));
+
var->setAlignment(alignment.getQuantity());
if (D.hasAttr<AnnotateAttr>())
Index: lib/CodeGen/CGCUDANV.cpp
===================================================================
--- lib/CodeGen/CGCUDANV.cpp
+++ lib/CodeGen/CGCUDANV.cpp
@@ -48,6 +48,9 @@
llvm::Constant *getSetupArgumentFn() const;
llvm::Constant *getLaunchFn() const;
+ std::string addPrefixToName(CodeGenModule &CGM, StringRef FuncName) const;
+ std::string addUnderscoredPrefixToName(CodeGenModule &CGM,
+ StringRef FuncName) const;
/// Creates a function to register all kernel stubs generated in this module.
llvm::Function *makeRegisterGlobalsFn();
@@ -91,6 +94,20 @@
}
+std::string CGNVCUDARuntime::addPrefixToName(CodeGenModule &CGM,
+ StringRef FuncName) const {
+ if (CGM.getLangOpts().HIP)
+ return ((Twine("hip") + Twine(FuncName)).str());
+ return ((Twine("cuda") + Twine(FuncName)).str());
+}
+std::string
+CGNVCUDARuntime::addUnderscoredPrefixToName(CodeGenModule &CGM,
+ StringRef FuncName) const {
+ if (CGM.getLangOpts().HIP)
+ return ((Twine("__hip") + Twine(FuncName)).str());
+ return ((Twine("__cuda") + Twine(FuncName)).str());
+}
+
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()) {
@@ -109,15 +126,21 @@
llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
// cudaError_t cudaSetupArgument(void *, size_t, size_t)
llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
- Params, false),
- "cudaSetupArgument");
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, Params, false),
+ addPrefixToName(CGM, "SetupArgument"));
}
llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
- // cudaError_t cudaLaunch(char *)
- return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
+ if (CGM.getLangOpts().HIP) {
+ // hipError_t hipLaunchByPtr(char *);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
+ } else {
+ // cudaError_t cudaLaunch(char *);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
+ }
}
void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
@@ -182,7 +205,8 @@
llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
- llvm::GlobalValue::InternalLinkage, "__cuda_register_globals", &TheModule);
+ llvm::GlobalValue::InternalLinkage,
+ addUnderscoredPrefixToName(CGM, "_register_globals"), &TheModule);
llvm::BasicBlock *EntryBB =
llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
CGBuilderTy Builder(CGM, Context);
@@ -195,7 +219,7 @@
VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
llvm::Constant *RegisterFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
- "__cudaRegisterFunction");
+ addUnderscoredPrefixToName(CGM, "RegisterFunction"));
// Extract GpuBinaryHandle passed as the first argument passed to
// __cuda_register_globals() and generate __cudaRegisterFunction() call for
@@ -219,7 +243,7 @@
IntTy, IntTy};
llvm::Constant *RegisterVar = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterVarParams, false),
- "__cudaRegisterVar");
+ addUnderscoredPrefixToName(CGM, "RegisterVar"));
for (auto &Pair : DeviceVars) {
llvm::GlobalVariable *Var = Pair.first;
unsigned Flags = Pair.second;
@@ -260,7 +284,7 @@
// void ** __cudaRegisterFatBinary(void *);
llvm::Constant *RegisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
- "__cudaRegisterFatBinary");
+ addUnderscoredPrefixToName(CGM, "RegisterFatBinary"));
// struct { int magic, int version, void * gpu_binary, void * dont_care };
llvm::StructType *FatbinWrapperTy =
llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
@@ -279,7 +303,8 @@
llvm::Function *ModuleCtorFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
- llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule);
+ llvm::GlobalValue::InternalLinkage,
+ addUnderscoredPrefixToName(CGM, "_module_ctor"), &TheModule);
llvm::BasicBlock *CtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
CGBuilderTy CtorBuilder(CGM, Context);
@@ -305,16 +330,18 @@
// Unused in fatbin v1.
Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
- "__cuda_fatbin_wrapper", CGM.getPointerAlign(),
+ addUnderscoredPrefixToName(CGM, "_fatbin_wrapper"), CGM.getPointerAlign(),
/*constant*/ true);
FatbinWrapper->setSection(FatbinSectionName);
// GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
RegisterFatbinFunc, CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
- llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
+ llvm::ConstantPointerNull::get(VoidPtrPtrTy),
+ addUnderscoredPrefixToName(CGM, "_gpubin_handle"));
+
CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
CGM.getPointerAlign());
@@ -341,11 +368,13 @@
// void __cudaUnregisterFatBinary(void ** handle);
llvm::Constant *UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
- "__cudaUnregisterFatBinary");
+ addUnderscoredPrefixToName(CGM, "UnregisterFatBinary"));
llvm::Function *ModuleDtorFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
- llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule);
+ llvm::GlobalValue::InternalLinkage,
+ addUnderscoredPrefixToName(CGM, "_module_dtor"), &TheModule);
+
llvm::BasicBlock *DtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
CGBuilderTy DtorBuilder(CGM, Context);
Index: include/clang/Basic/LangOptions.def
===================================================================
--- include/clang/Basic/LangOptions.def
+++ include/clang/Basic/LangOptions.def
@@ -193,6 +193,7 @@
LANGOPT(NativeHalfArgsAndReturns, 1, 0, "Native half args and returns")
LANGOPT(HalfArgsAndReturns, 1, 0, "half args and returns")
LANGOPT(CUDA , 1, 0, "CUDA")
+LANGOPT(HIP , 1, 0, "HIP")
LANGOPT(OpenMP , 32, 0, "OpenMP support and version of OpenMP (31, 40 or 45)")
LANGOPT(OpenMPSimd , 1, 0, "Use SIMD only OpenMP support.")
LANGOPT(OpenMPUseTLS , 1, 0, "Use TLS for threadprivates or runtime calls")
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits