koops updated this revision to Diff 557397.
koops added a comment.
Adding extra test cases to loop_bind_messages.cpp.
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D158266/new/
https://reviews.llvm.org/D158266
Files:
clang/include/clang/Sema/Sema.h
clang/lib/Sema/SemaOpenMP.cpp
clang/test/OpenMP/loop_bind_messages.cpp
clang/test/PCH/pragma-loop.cpp
Index: clang/test/PCH/pragma-loop.cpp
===================================================================
--- clang/test/PCH/pragma-loop.cpp
+++ clang/test/PCH/pragma-loop.cpp
@@ -116,9 +116,13 @@
inline void run10(int *List, int Length) {
int i = 0;
-#pragma omp loop bind(teams)
+ int j = 0;
+ #pragma omp teams
for (int i = 0; i < Length; i++) {
- List[i] = i;
+ #pragma omp loop bind(teams)
+ for (int j = 0; j < Length; j++) {
+ List[i] = i+j;
+ }
}
}
Index: clang/test/OpenMP/loop_bind_messages.cpp
===================================================================
--- clang/test/OpenMP/loop_bind_messages.cpp
+++ clang/test/OpenMP/loop_bind_messages.cpp
@@ -4,6 +4,7 @@
#define NNN 50
int aaa[NNN];
+int aaa2[NNN][NNN];
void parallel_loop() {
#pragma omp parallel
@@ -15,6 +16,91 @@
}
}
+void parallel_for_AND_loop_bind() {
+ #pragma omp parallel for
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+}
+
+void parallel_nowait() {
+ #pragma omp parallel
+ #pragma omp for nowait
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+}
+
+void parallel_for_with_nothing() {
+ #pragma omp parallel for
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp nothing
+ #pragma omp loop // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+}
+
+void parallel_targetfor_with_loop_bind() {
+ #pragma omp target teams distribute parallel for
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'target teams distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+}
+
+void parallel_targetparallel_with_loop() {
+ #pragma omp target parallel
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+}
+
+void loop_bind_AND_loop_bind() {
+ #pragma omp parallel for
+ for (int i = 0; i < 100; ++i) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'loop' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[j] = j*NNN;
+ }
+ }
+ }
+}
+
+void parallel_with_sections_loop() {
+ #pragma omp parallel
+ {
+ #pragma omp sections
+ {
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'sections' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+
+ #pragma omp section
+ {
+ aaa[NNN-1] = NNN;
+ }
+ }
+ }
+}
+
void teams_loop() {
int var1, var2;
@@ -34,17 +120,23 @@
}
}
-void orphan_loop_with_bind() {
- #pragma omp loop bind(parallel)
- for (int j = 0 ; j < NNN ; j++) {
- aaa[j] = j*NNN;
+void teams_targetteams_with_loop() {
+ #pragma omp target teams
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(teams)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
}
}
-void orphan_loop_no_bind() {
- #pragma omp loop // expected-error{{expected 'bind' clause for 'loop' construct without an enclosing OpenMP construct}}
- for (int j = 0 ; j < NNN ; j++) {
- aaa[j] = j*NNN;
+void teams_targetfor_with_loop_bind() {
+ #pragma omp target teams distribute parallel for
+ for (int i = 0 ; i < NNN ; i++) {
+ #pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'target teams distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa2[i][j] = i+j;
+ }
}
}
@@ -65,12 +157,80 @@
}
}
+void teams_loop_distribute() {
+ int total = 0;
+
+ #pragma omp teams num_teams(8) thread_limit(256)
+ #pragma omp distribute parallel for dist_schedule(static, 1024) \
+ schedule(static, 64)
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(teams) // expected-error{{'distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
+ for (int j = 0; j < NNN; j++) {
+ aaa2[i][j] = i+j;
+ }
+ }
+}
+
+void parallel_for_with_loop_teams_bind(){
+ #pragma omp parallel for
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[i] = i+i*NNN;
+ }
+ }
+}
+
+void teams_with_loop_thread_bind(){
+ #pragma omp teams
+ for (int i = 0; i < NNN; i++) {
+ #pragma omp loop bind(thread)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[i] = i+i*NNN;
+ }
+ }
+}
+
+void orphan_loop_no_bind() {
+ #pragma omp loop // expected-error{{expected 'bind' clause for 'loop' construct without an enclosing OpenMP construct}}
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[j] = j*NNN;
+ }
+}
+
+void orphan_loop_parallel_bind() {
+ #pragma omp loop bind(parallel)
+ for (int j = 0 ; j < NNN ; j++) {
+ aaa[j] = j*NNN;
+ }
+}
+
+void orphan_loop_teams_bind(){
+ #pragma omp loop bind(teams)
+ for (int i = 0; i < NNN; i++) {
+ aaa[i] = i+i*NNN;
+ }
+}
+
int main(int argc, char *argv[]) {
parallel_loop();
+ parallel_for_AND_loop_bind();
+ parallel_nowait();
+ parallel_for_with_nothing();
+ parallel_targetfor_with_loop_bind();
+ parallel_targetparallel_with_loop();
+ loop_bind_AND_loop_bind();
+ parallel_with_sections_loop();
teams_loop();
- orphan_loop_with_bind();
- orphan_loop_no_bind();
+ teams_targetteams_with_loop();
+ teams_targetfor_with_loop_bind();
teams_loop_reduction();
+ teams_loop_distribute();
+ parallel_for_with_loop_teams_bind();
+ teams_with_loop_thread_bind();
+ orphan_loop_no_bind();
+ orphan_loop_parallel_bind();
+ orphan_loop_teams_bind();
}
#endif
Index: clang/lib/Sema/SemaOpenMP.cpp
===================================================================
--- clang/lib/Sema/SemaOpenMP.cpp
+++ clang/lib/Sema/SemaOpenMP.cpp
@@ -6116,33 +6116,36 @@
ArrayRef<OMPClause *> Clauses,
OpenMPBindClauseKind BindKind,
OpenMPDirectiveKind &Kind,
- OpenMPDirectiveKind &PrevMappedDirective) {
+ OpenMPDirectiveKind &PrevMappedDirective,
+ SourceLocation StartLoc, SourceLocation EndLoc) {
bool UseClausesWithoutBind = false;
// Restricting to "#pragma omp loop bind"
if (getLangOpts().OpenMP >= 50 && Kind == OMPD_loop) {
+
+ const OpenMPDirectiveKind ParentDirective = DSAStack->getParentDirective();
+
if (BindKind == OMPC_BIND_unknown) {
// Setting the enclosing teams or parallel construct for the loop
// directive without bind clause.
BindKind = OMPC_BIND_thread; // Default bind(thread) if binding is unknown
- const OpenMPDirectiveKind ParentDirective =
- DSAStack->getParentDirective();
if (ParentDirective == OMPD_unknown) {
Diag(DSAStack->getDefaultDSALocation(),
diag::err_omp_bind_required_on_loop);
- } else if (ParentDirective == OMPD_parallel ||
- ParentDirective == OMPD_target_parallel) {
+ } else if (isOpenMPParallelDirective(ParentDirective) &&
+ !isOpenMPTeamsDirective(ParentDirective)) {
BindKind = OMPC_BIND_parallel;
- } else if (ParentDirective == OMPD_teams ||
- ParentDirective == OMPD_target_teams) {
+ } else if (isOpenMPNestingTeamsDirective(ParentDirective) ||
+ (ParentDirective == OMPD_target_teams)) {
BindKind = OMPC_BIND_teams;
}
} else {
- // bind clause is present, so we should set flag indicating to only
- // use the clauses that aren't the bind clause for the new directive that
- // loop is lowered to.
+ // bind clause is present in loop directive. When the loop directive is
+ // changed to a new directive the bind clause is not used. So, we should
+ // set flag indicating to only use the clauses that aren't the
+ // bind clause.
UseClausesWithoutBind = true;
}
@@ -6161,12 +6164,23 @@
switch (BindKind) {
case OMPC_BIND_parallel:
+ if (isOpenMPWorksharingDirective(ParentDirective) ||
+ ParentDirective == OMPD_loop) {
+ Diag(StartLoc, diag::err_omp_prohibited_region)
+ << true << getOpenMPDirectiveName(ParentDirective) << 1
+ << getOpenMPDirectiveName(Kind);
+ }
Kind = OMPD_for;
DSAStack->setCurrentDirective(OMPD_for);
DSAStack->setMappedDirective(OMPD_loop);
PrevMappedDirective = OMPD_loop;
break;
case OMPC_BIND_teams:
+ if (isOpenMPWorksharingDirective(ParentDirective)) {
+ Diag(StartLoc, diag::err_omp_prohibited_region)
+ << true << getOpenMPDirectiveName(ParentDirective) << 1
+ << getOpenMPDirectiveName(Kind);
+ }
Kind = OMPD_distribute;
DSAStack->setCurrentDirective(OMPD_distribute);
DSAStack->setMappedDirective(OMPD_loop);
@@ -6206,6 +6220,7 @@
if (const OMPBindClause *BC =
OMPExecutableDirective::getSingleClause<OMPBindClause>(Clauses))
BindKind = BC->getBindKind();
+
// First check CancelRegion which is then used in checkNestingOfRegions.
if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
@@ -6220,8 +6235,9 @@
llvm::SmallVector<OMPClause *> ClausesWithoutBind;
bool UseClausesWithoutBind = false;
- UseClausesWithoutBind = mapLoopConstruct(ClausesWithoutBind, Clauses,
- BindKind, Kind, PrevMappedDirective);
+ UseClausesWithoutBind =
+ mapLoopConstruct(ClausesWithoutBind, Clauses, BindKind, Kind,
+ PrevMappedDirective, StartLoc, EndLoc);
llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
VarsWithInheritedDSAType VarsWithInheritedDSA;
Index: clang/include/clang/Sema/Sema.h
===================================================================
--- clang/include/clang/Sema/Sema.h
+++ clang/include/clang/Sema/Sema.h
@@ -11236,6 +11236,7 @@
/// on the parameter of the bind clause. In the methods for the
/// mapped directives, check the parameters of the lastprivate clause.
bool checkLastPrivateForMappedDirectives(ArrayRef<OMPClause *> Clauses);
+
/// Depending on the bind clause of OMPD_loop map the directive to new
/// directives.
/// 1) loop bind(parallel) --> OMPD_for
@@ -11247,7 +11248,8 @@
ArrayRef<OMPClause *> Clauses,
OpenMPBindClauseKind BindKind,
OpenMPDirectiveKind &Kind,
- OpenMPDirectiveKind &PrevMappedDirective);
+ OpenMPDirectiveKind &PrevMappedDirective,
+ SourceLocation StartLoc, SourceLocation EndLoc);
public:
/// The declarator \p D defines a function in the scope \p S which is nested
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits