================
@@ -64,6 +66,27 @@ void CommonSPIRABIInfo::setCCs() {
RuntimeCC = llvm::CallingConv::SPIR_FUNC;
}
+ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy) const {
+ if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
+ return DefaultABIInfo::classifyReturnType(RetTy);
+ if (!isAggregateTypeForABI(RetTy) || getRecordArgABI(RetTy, getCXXABI()))
+ return DefaultABIInfo::classifyReturnType(RetTy);
+
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyReturnType(RetTy);
+ }
+
+ // TODO: The AMDGPU ABI is non-trivial to represent in SPIR-V; in order to
+ // avoid encoding various architecture specific bits here we return
everything
+ // as direct to retain type info for things like aggregates, for later
perusal
+ // when translating back to LLVM/lowering in the BE. This is also why we
+ // disable flattening as the outcomes can mismatch between SPIR-V and AMDGPU.
+ // This will be revisited / optimised in the future.
----------------
arsenm wrote:
byval doesn't really make sense on a kernel as there is no real caller, but
depending on how SPIRV defined its byval attribute, maybe you can codegen LLVM
byref into SPIRV byval?
https://github.com/llvm/llvm-project/pull/102776
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits