This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG9c2cfaaada66: [Clang][OpenMP] Allow `f16` literal suffix
when compiling OpenMP target… (authored by tianshilei1992).
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D142075/new/
https://reviews.llvm.org/D142075
Files:
clang/lib/Lex/LiteralSupport.cpp
clang/test/OpenMP/float16_sema.cpp
Index: clang/test/OpenMP/float16_sema.cpp
===================================================================
--- /dev/null
+++ clang/test/OpenMP/float16_sema.cpp
@@ -0,0 +1,10 @@
+// RUN: %clang_cc1 -fsyntax-only -x c++ -triple x86_64-unknown-linux-gnu
-fopenmp -fopenmp-targets=nvptx64 -verify %s
+// expected-no-diagnostics
+
+int foo() {
+#pragma omp target
+ {
+ __fp16 a = -1.0f16;
+ }
+ return 0;
+}
Index: clang/lib/Lex/LiteralSupport.cpp
===================================================================
--- clang/lib/Lex/LiteralSupport.cpp
+++ clang/lib/Lex/LiteralSupport.cpp
@@ -943,9 +943,13 @@
// CUDA host and device may have different _Float16 support, therefore
// allows f16 literals to avoid false alarm.
+ // When we compile for OpenMP target offloading on NVPTX, f16 suffix
+ // should also be supported.
// ToDo: more precise check for CUDA.
- if ((Target.hasFloat16Type() || LangOpts.CUDA) && s + 2 < ThisTokEnd &&
- s[1] == '1' && s[2] == '6') {
+ // TODO: AMDGPU might also support it in the future.
+ if ((Target.hasFloat16Type() || LangOpts.CUDA ||
+ (LangOpts.OpenMPIsDevice && Target.getTriple().isNVPTX())) &&
+ s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') {
s += 2; // success, eat up 2 characters.
isFloat16 = true;
continue;
Index: clang/test/OpenMP/float16_sema.cpp
===================================================================
--- /dev/null
+++ clang/test/OpenMP/float16_sema.cpp
@@ -0,0 +1,10 @@
+// RUN: %clang_cc1 -fsyntax-only -x c++ -triple x86_64-unknown-linux-gnu -fopenmp -fopenmp-targets=nvptx64 -verify %s
+// expected-no-diagnostics
+
+int foo() {
+#pragma omp target
+ {
+ __fp16 a = -1.0f16;
+ }
+ return 0;
+}
Index: clang/lib/Lex/LiteralSupport.cpp
===================================================================
--- clang/lib/Lex/LiteralSupport.cpp
+++ clang/lib/Lex/LiteralSupport.cpp
@@ -943,9 +943,13 @@
// CUDA host and device may have different _Float16 support, therefore
// allows f16 literals to avoid false alarm.
+ // When we compile for OpenMP target offloading on NVPTX, f16 suffix
+ // should also be supported.
// ToDo: more precise check for CUDA.
- if ((Target.hasFloat16Type() || LangOpts.CUDA) && s + 2 < ThisTokEnd &&
- s[1] == '1' && s[2] == '6') {
+ // TODO: AMDGPU might also support it in the future.
+ if ((Target.hasFloat16Type() || LangOpts.CUDA ||
+ (LangOpts.OpenMPIsDevice && Target.getTriple().isNVPTX())) &&
+ s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') {
s += 2; // success, eat up 2 characters.
isFloat16 = true;
continue;
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits