Hi! This patch adds support for 4 new combined constructs and fixes various issues in the clause splitting code and other issues I found on the new testcases.
2015-07-16 Jakub Jelinek <ja...@redhat.com> gcc/ * omp-low.c (expand_omp_build_assign): Add prototype. Add AFTER argument, if true emit statements after *GSI_P and continue linking. (expand_parallel_call): Use expand_omp_build_assign. gcc/c-family/ * c-omp.c (c_omp_split_clauses): Handle new 4 combined constructs. Handle OMP_CLAUSE_DEFAULTMAP. Document 2 missing combined constructs. Fix up OMP_CLAUSE_FIRSTPRIVATE handling on #pragma omp distribute simd. Fix up shared/default handling. Add ENABLE_CHECKING verification. gcc/c/ * c-parser.c (c_parser_omp_parallel): Allow parsing #pragma omp target parallel. (c_parser_omp_target): Allow parsing #pragma omp target simd and #pragma omp target parallel{, for{, simd}}. gcc/cp/ * parser.c (cp_parser_omp_clause_priority): Fix typo. (cp_parser_omp_parallel): Allow parsing #pragma omp target parallel. (cp_parser_omp_target): Allow parsing #pragma omp target simd and #pragma omp target parallel{, for{, simd}}. gcc/testsuite/ * c-c++-common/gomp/clauses-1.c: New test. libgomp/ * testsuite/libgomp.c/for-2.h (OMPTGT, OMPTO, OMPFROM): Define if not already defined. (N(f0), N(f1), N(f2), N(f3), N(f4), N(f5), N(f6), N(f7), N(f8), N(f9), N(f10), N(f11), N(f12), N(f13), N(f14)): Use OMPTGT macro. (N(test)): Use OMPTO and OMPFROM macros. * testsuite/libgomp.c/for-5.c: New test. * testsuite/libgomp.c/for-6.c: New test. * testsuite/libgomp.c++/for-13.C: New test. * testsuite/libgomp.c++/for-14.C: New test. --- gcc/omp-low.c.jj 2015-07-15 13:00:32.000000000 +0200 +++ gcc/omp-low.c 2015-07-16 16:28:36.429055322 +0200 @@ -5637,6 +5637,8 @@ gimple_build_cond_empty (tree cond) return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE); } +static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree, + bool = false); /* Build the function calls to GOMP_parallel_start etc to actually generate the parallel operation. REGION is the parallel region @@ -5754,13 +5756,12 @@ expand_parallel_call (struct omp_region gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); - stmt = gimple_build_assign (tmp_then, val); - gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); + expand_omp_build_assign (&gsi, tmp_then, val, true); gsi = gsi_start_bb (else_bb); - stmt = gimple_build_assign - (tmp_else, build_int_cst (unsigned_type_node, 1)); - gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); + expand_omp_build_assign (&gsi, tmp_else, + build_int_cst (unsigned_type_node, 1), + true); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); @@ -6221,16 +6222,21 @@ expand_omp_regimplify_p (tree *tp, int * return NULL_TREE; } -/* Prepend TO = FROM assignment before *GSI_P. */ +/* Prepend or append TO = FROM assignment before or after *GSI_P. */ static void -expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from) +expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from, + bool after) { bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to); from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE, - true, GSI_SAME_STMT); + !after, after ? GSI_CONTINUE_LINKING + : GSI_SAME_STMT); gimple stmt = gimple_build_assign (to, from); - gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT); + if (after) + gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING); + else + gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT); if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL) || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL)) { --- gcc/c-family/c-omp.c.jj 2015-07-15 13:02:31.000000000 +0200 +++ gcc/c-family/c-omp.c 2015-07-16 15:11:12.725828803 +0200 @@ -684,28 +684,34 @@ c_finish_omp_for (location_t locus, enum } } -/* Right now we have 15 different combined/composite constructs, this +/* Right now we have 21 different combined/composite constructs, this function attempts to split or duplicate clauses for combined constructs. CODE is the innermost construct in the combined construct, and MASK allows to determine which constructs are combined together, as every construct has at least one clause that no other construct has (except for OMP_SECTIONS, but that can be only combined with parallel). - Combined constructs are: - #pragma omp parallel for - #pragma omp parallel sections - #pragma omp parallel for simd - #pragma omp for simd - #pragma omp distribute simd + Combined/composite constructs are: #pragma omp distribute parallel for #pragma omp distribute parallel for simd - #pragma omp teams distribute - #pragma omp teams distribute parallel for - #pragma omp teams distribute parallel for simd + #pragma omp distribute simd + #pragma omp for simd + #pragma omp parallel for + #pragma omp parallel for simd + #pragma omp parallel sections + #pragma omp target parallel + #pragma omp target parallel for + #pragma omp target parallel for simd #pragma omp target teams #pragma omp target teams distribute #pragma omp target teams distribute parallel for #pragma omp target teams distribute parallel for simd - #pragma omp taskloop simd */ + #pragma omp target teams distribute simd + #pragma omp target simd + #pragma omp taskloop simd + #pragma omp teams distribute + #pragma omp teams distribute parallel for + #pragma omp teams distribute parallel for simd + #pragma omp teams distribute simd */ void c_omp_split_clauses (location_t loc, enum tree_code code, @@ -745,6 +751,7 @@ c_omp_split_clauses (location_t loc, enu case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: case OMP_CLAUSE_IS_DEVICE_PTR: + case OMP_CLAUSE_DEFAULTMAP: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: @@ -786,12 +793,25 @@ c_omp_split_clauses (location_t loc, enu case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { - c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), - OMP_CLAUSE_COLLAPSE); - OMP_CLAUSE_COLLAPSE_EXPR (c) - = OMP_CLAUSE_COLLAPSE_EXPR (clauses); - OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; - cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; + if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) + | (OMP_CLAUSE_MASK_1 + << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) + | (OMP_CLAUSE_MASK_1 + << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) + { + c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), + OMP_CLAUSE_COLLAPSE); + OMP_CLAUSE_COLLAPSE_EXPR (c) + = OMP_CLAUSE_COLLAPSE_EXPR (clauses); + OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; + cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; + } + else + { + /* This must be #pragma omp target simd */ + s = C_OMP_CLAUSE_SPLIT_SIMD; + break; + } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { @@ -837,6 +857,16 @@ c_omp_split_clauses (location_t loc, enu if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { + if (code == OMP_SIMD + && (mask & ((OMP_CLAUSE_MASK_1 + << PRAGMA_OMP_CLAUSE_NUM_THREADS) + | (OMP_CLAUSE_MASK_1 + << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) + { + /* This must be #pragma omp target simd. */ + s = C_OMP_CLAUSE_SPLIT_TARGET; + break; + } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); @@ -863,7 +893,9 @@ c_omp_split_clauses (location_t loc, enu } else /* This must be - #pragma omp parallel{, for{, simd}, sections}. */ + #pragma omp parallel{, for{, simd}, sections} + or + #pragma omp target parallel. */ s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) @@ -883,7 +915,7 @@ c_omp_split_clauses (location_t loc, enu { /* This must be #pragma omp distribute simd. */ gcc_assert (code == OMP_SIMD); - s = C_OMP_CLAUSE_SPLIT_TEAMS; + s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) @@ -932,11 +964,6 @@ c_omp_split_clauses (location_t loc, enu taskloop. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: - if (code == OMP_TEAMS) - { - s = C_OMP_CLAUSE_SPLIT_TEAMS; - break; - } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { @@ -946,6 +973,12 @@ c_omp_split_clauses (location_t loc, enu if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { + if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) + == 0) + { + s = C_OMP_CLAUSE_SPLIT_TEAMS; + break; + } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) @@ -955,7 +988,6 @@ c_omp_split_clauses (location_t loc, enu = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; - } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; @@ -963,22 +995,22 @@ c_omp_split_clauses (location_t loc, enu Duplicate it on all of them, but omit on for or sections if parallel is present. */ case OMP_CLAUSE_REDUCTION: - if (code == OMP_SIMD) - { - c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), - OMP_CLAUSE_REDUCTION); - OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); - OMP_CLAUSE_REDUCTION_CODE (c) - = OMP_CLAUSE_REDUCTION_CODE (clauses); - OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) - = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); - OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) - = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); - OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; - cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; - } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { + if (code == OMP_SIMD) + { + c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), + OMP_CLAUSE_REDUCTION); + OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); + OMP_CLAUSE_REDUCTION_CODE (c) + = OMP_CLAUSE_REDUCTION_CODE (clauses); + OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) + = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); + OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) + = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); + OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; + cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; + } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { @@ -1001,8 +1033,10 @@ c_omp_split_clauses (location_t loc, enu else s = C_OMP_CLAUSE_SPLIT_FOR; } - else if (code == OMP_SECTIONS) + else if (code == OMP_SECTIONS || code == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; + else if (code == OMP_SIMD) + s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; @@ -1060,6 +1094,22 @@ c_omp_split_clauses (location_t loc, enu OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; cclauses[s] = clauses; } +#ifdef ENABLE_CHECKING + if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) + gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); + if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) + gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); + if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) + gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); + if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) + gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); + if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) + | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 + && code != OMP_SECTIONS) + gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); + if (code != OMP_SIMD) + gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); +#endif } --- gcc/c/c-parser.c.jj 2015-07-15 13:02:31.000000000 +0200 +++ gcc/c/c-parser.c 2015-07-16 11:08:22.806359460 +0200 @@ -14261,7 +14261,11 @@ c_parser_omp_parallel (location_t loc, c OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } - else if (cclauses) + /* When combined with distribute, parallel has to be followed by for. + #pragma omp target parallel is allowed though. */ + else if (cclauses + && (mask & (OMP_CLAUSE_MASK_1 + << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { error_at (loc, "expected %<for%> after %qs", p_name); c_parser_skip_to_pragma_eol (parser); @@ -14272,7 +14276,7 @@ c_parser_omp_parallel (location_t loc, c c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } - else if (c_parser_next_token_is (parser, CPP_NAME)) + else if (cclauses == NULL && c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "sections") == 0) @@ -14293,6 +14297,11 @@ c_parser_omp_parallel (location_t loc, c } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); + if (cclauses) + { + omp_split_clauses (loc, OMP_PARALLEL, mask, clauses, cclauses); + clauses = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; + } block = c_begin_omp_parallel (); c_parser_statement (parser); @@ -14928,8 +14937,15 @@ c_parser_omp_target (c_parser *parser, e if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); + enum tree_code ccode = ERROR_MARK; if (strcmp (p, "teams") == 0) + ccode = OMP_TEAMS; + else if (strcmp (p, "parallel") == 0) + ccode = OMP_PARALLEL; + else if (strcmp (p, "simd") == 0) + ccode = OMP_SIMD; + if (ccode != ERROR_MARK) { tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT]; char p_name[sizeof ("#pragma omp target teams distribute " @@ -14939,15 +14955,48 @@ c_parser_omp_target (c_parser *parser, e strcpy (p_name, "#pragma omp target"); if (!flag_openmp) /* flag_openmp_simd */ { - tree stmt = c_parser_omp_teams (loc, parser, p_name, - OMP_TARGET_CLAUSE_MASK, - cclauses); + tree stmt; + switch (ccode) + { + case OMP_TEAMS: + stmt = c_parser_omp_teams (loc, parser, p_name, + OMP_TARGET_CLAUSE_MASK, + cclauses); + break; + case OMP_PARALLEL: + stmt = c_parser_omp_parallel (loc, parser, p_name, + OMP_TARGET_CLAUSE_MASK, + cclauses); + break; + case OMP_SIMD: + stmt = c_parser_omp_simd (loc, parser, p_name, + OMP_TARGET_CLAUSE_MASK, + cclauses); + break; + default: + gcc_unreachable (); + } return stmt != NULL_TREE; } keep_next_level (); - tree block = c_begin_compound_stmt (true); - tree ret = c_parser_omp_teams (loc, parser, p_name, - OMP_TARGET_CLAUSE_MASK, cclauses); + tree block = c_begin_compound_stmt (true), ret; + switch (ccode) + { + case OMP_TEAMS: + ret = c_parser_omp_teams (loc, parser, p_name, + OMP_TARGET_CLAUSE_MASK, cclauses); + break; + case OMP_PARALLEL: + ret = c_parser_omp_parallel (loc, parser, p_name, + OMP_TARGET_CLAUSE_MASK, cclauses); + break; + case OMP_SIMD: + ret = c_parser_omp_simd (loc, parser, p_name, + OMP_TARGET_CLAUSE_MASK, cclauses); + break; + default: + gcc_unreachable (); + } block = c_end_compound_stmt (loc, block, true); if (ret == NULL_TREE) return false; --- gcc/cp/parser.c.jj 2015-07-15 16:41:48.000000000 +0200 +++ gcc/cp/parser.c 2015-07-16 16:32:28.008345373 +0200 @@ -28737,7 +28737,7 @@ cp_parser_omp_clause_priority (cp_parser /*or_comma=*/false, /*consume_paren=*/true); - check_no_duplicate_clause (list, OMP_CLAUSE_GRAINSIZE, + check_no_duplicate_clause (list, OMP_CLAUSE_PRIORITY, "priority", location); c = build_omp_clause (location, OMP_CLAUSE_PRIORITY); @@ -31867,7 +31867,11 @@ cp_parser_omp_parallel (cp_parser *parse OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } - else if (cclauses) + /* When combined with distribute, parallel has to be followed by for. + #pragma omp target parallel is allowed though. */ + else if (cclauses + && (mask & (OMP_CLAUSE_MASK_1 + << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { error_at (loc, "expected %<for%> after %qs", p_name); cp_parser_skip_to_pragma_eol (parser, pragma_tok); @@ -31878,7 +31882,7 @@ cp_parser_omp_parallel (cp_parser *parse cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } - else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) + else if (cclauses == NULL && cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); @@ -31900,6 +31904,11 @@ cp_parser_omp_parallel (cp_parser *parse } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok); + if (cclauses) + { + cp_omp_split_clauses (loc, OMP_PARALLEL, mask, clauses, cclauses); + clauses = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; + } block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); @@ -32551,8 +32560,15 @@ cp_parser_omp_target (cp_parser *parser, { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); + enum tree_code ccode = ERROR_MARK; if (strcmp (p, "teams") == 0) + ccode = OMP_TEAMS; + else if (strcmp (p, "parallel") == 0) + ccode = OMP_PARALLEL; + else if (strcmp (p, "simd") == 0) + ccode = OMP_SIMD; + if (ccode != ERROR_MARK) { tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT]; char p_name[sizeof ("#pragma omp target teams distribute " @@ -32562,16 +32578,49 @@ cp_parser_omp_target (cp_parser *parser, strcpy (p_name, "#pragma omp target"); if (!flag_openmp) /* flag_openmp_simd */ { - tree stmt = cp_parser_omp_teams (parser, pragma_tok, p_name, - OMP_TARGET_CLAUSE_MASK, - cclauses); + tree stmt; + switch (ccode) + { + case OMP_TEAMS: + stmt = cp_parser_omp_teams (parser, pragma_tok, p_name, + OMP_TARGET_CLAUSE_MASK, + cclauses); + break; + case OMP_PARALLEL: + stmt = cp_parser_omp_parallel (parser, pragma_tok, p_name, + OMP_TARGET_CLAUSE_MASK, + cclauses); + break; + case OMP_SIMD: + stmt = cp_parser_omp_simd (parser, pragma_tok, p_name, + OMP_TARGET_CLAUSE_MASK, + cclauses); + break; + default: + gcc_unreachable (); + } return stmt != NULL_TREE; } keep_next_level (true); - tree sb = begin_omp_structured_block (); + tree sb = begin_omp_structured_block (), ret; unsigned save = cp_parser_begin_omp_structured_block (parser); - tree ret = cp_parser_omp_teams (parser, pragma_tok, p_name, - OMP_TARGET_CLAUSE_MASK, cclauses); + switch (ccode) + { + case OMP_TEAMS: + ret = cp_parser_omp_teams (parser, pragma_tok, p_name, + OMP_TARGET_CLAUSE_MASK, cclauses); + break; + case OMP_PARALLEL: + ret = cp_parser_omp_parallel (parser, pragma_tok, p_name, + OMP_TARGET_CLAUSE_MASK, cclauses); + break; + case OMP_SIMD: + ret = cp_parser_omp_simd (parser, pragma_tok, p_name, + OMP_TARGET_CLAUSE_MASK, cclauses); + break; + default: + gcc_unreachable (); + } cp_parser_end_omp_structured_block (parser, save); tree body = finish_omp_structured_block (sb); if (ret == NULL_TREE) --- gcc/testsuite/c-c++-common/gomp/clauses-1.c.jj 2015-07-16 15:19:39.645945737 +0200 +++ gcc/testsuite/c-c++-common/gomp/clauses-1.c 2015-07-16 15:20:31.786566176 +0200 @@ -0,0 +1,162 @@ +/* { dg-do compile } */ +/* { dg-options "-fopenmp" } */ +/* { dg-additional-options "-std=c99" { target c } } */ + +int t; +#pragma omp threadprivate (t) + +#pragma omp declare target +int f, l, ll, r; + +void +foo (int d, int m, int i1, int i2, int p, int *idp, int s, + int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q) +{ + #pragma omp distribute parallel for \ + private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \ + if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) ordered schedule(static, 4) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp distribute parallel for simd \ + private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \ + if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) schedule(static, 4) \ + safelen(8) simdlen(4) aligned(q: 32) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp distribute simd \ + private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \ + safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r) + for (int i = 0; i < 64; i++) + ll++; +} +#pragma omp end declare target + +void +bar (int d, int m, int i1, int i2, int p, int *idp, int s, + int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q) +{ + #pragma omp for simd \ + private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait \ + safelen(8) simdlen(4) aligned(q: 32) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp parallel for \ + private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp parallel for simd \ + private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \ + safelen(8) simdlen(4) aligned(q: 32) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp parallel sections \ + private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \ + lastprivate (l) + { + #pragma omp section + {} + #pragma omp section + {} + } + #pragma omp target parallel \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) + ; + #pragma omp target parallel for \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp target parallel for simd \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \ + safelen(8) simdlen(4) aligned(q: 32) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp target teams \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) + ; + #pragma omp target teams distribute \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \ + collapse(1) dist_schedule(static, 16) + for (int i = 0; i < 64; i++) + ; + #pragma omp target teams distribute parallel for \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \ + collapse(1) dist_schedule(static, 16) \ + if (parallel: i2) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) ordered schedule(static, 4) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp target teams distribute parallel for simd \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \ + collapse(1) dist_schedule(static, 16) \ + if (parallel: i2) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) schedule(static, 4) \ + safelen(8) simdlen(4) aligned(q: 32) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp target teams distribute simd \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \ + collapse(1) dist_schedule(static, 16) \ + safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp target simd \ + device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \ + safelen(8) simdlen(4) lastprivate (l) linear(ll: 1) aligned(q: 32) reduction(+:r) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp taskloop simd \ + private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable nogroup priority (pp) \ + safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp taskwait + #pragma omp taskloop simd \ + private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) if(taskloop: i1) final(fi) priority (pp) \ + safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp target + #pragma omp teams distribute \ + private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \ + collapse(1) dist_schedule(static, 16) + for (int i = 0; i < 64; i++) + ; + #pragma omp target + #pragma omp teams distribute parallel for \ + private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \ + collapse(1) dist_schedule(static, 16) \ + if (parallel: i2) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) ordered schedule(static, 4) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp target + #pragma omp teams distribute parallel for simd \ + private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \ + collapse(1) dist_schedule(static, 16) \ + if (parallel: i2) num_threads (nth) proc_bind(spread) \ + lastprivate (l) linear (ll:1) schedule(static, 4) \ + safelen(8) simdlen(4) aligned(q: 32) + for (int i = 0; i < 64; i++) + ll++; + #pragma omp target + #pragma omp teams distribute simd \ + private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \ + collapse(1) dist_schedule(static, 16) \ + safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) + for (int i = 0; i < 64; i++) + ll++; +} --- libgomp/testsuite/libgomp.c/for-2.h.jj 2015-04-24 12:30:40.000000000 +0200 +++ libgomp/testsuite/libgomp.c/for-2.h 2015-07-16 17:10:49.765625686 +0200 @@ -11,11 +11,21 @@ noreturn (void) #ifndef SC #define SC #endif +#ifndef OMPTGT +#define OMPTGT +#endif +#ifndef OMPTO +#define OMPTO(v) do {} while (0) +#endif +#ifndef OMPFROM +#define OMPFROM(v) do {} while (0) +#endif __attribute__((noinline, noclone)) void N(f0) (void) { int i; + OMPTGT #pragma omp F S for (i = 0; i < 1500; i++) a[i] += 2; @@ -24,6 +34,7 @@ N(f0) (void) __attribute__((noinline, noclone)) void N(f1) (void) { + OMPTGT #pragma omp F S for (unsigned int i = __INT_MAX__; i < 3000U + __INT_MAX__; i += 2) a[(i - __INT_MAX__) >> 1] -= 2; @@ -33,6 +44,7 @@ __attribute__((noinline, noclone)) void N(f2) (void) { unsigned long long i; + OMPTGT #pragma omp F S for (i = __LONG_LONG_MAX__ + 4500ULL - 27; i > __LONG_LONG_MAX__ - 27ULL; i -= 3) @@ -42,6 +54,7 @@ N(f2) (void) __attribute__((noinline, noclone)) void N(f3) (long long n1, long long n2, long long s3) { + OMPTGT #pragma omp F S for (long long i = n1 + 23; i > n2 - 25; i -= s3) a[i + 48] += 7; @@ -51,6 +64,7 @@ __attribute__((noinline, noclone)) void N(f4) (void) { unsigned int i; + OMPTGT #pragma omp F S for (i = 30; i < 20; i += 2) a[i] += 10; @@ -61,6 +75,7 @@ N(f5) (int n11, int n12, int n21, int n2 int s1, int s2, int s3) { SC int v1, v2, v3; + OMPTGT #pragma omp F S collapse(3) for (v1 = n11; v1 < n12; v1 += s1) for (v2 = n21; v2 < n22; v2 += s2) @@ -74,6 +89,7 @@ N(f6) (int n11, int n12, int n21, int n2 { SC int v1, v2; SC long long v3; + OMPTGT #pragma omp F S collapse(3) for (v1 = n11; v1 > n12; v1 += s1) for (v2 = n21; v2 > n22; v2 += s2) @@ -86,6 +102,7 @@ N(f7) (void) { SC unsigned int v1, v3; SC unsigned long long v2; + OMPTGT #pragma omp F S collapse(3) for (v1 = 0; v1 < 20; v1 += 2) for (v2 = __LONG_LONG_MAX__ + 16ULL; @@ -98,6 +115,7 @@ __attribute__((noinline, noclone)) void N(f8) (void) { SC long long v1, v2, v3; + OMPTGT #pragma omp F S collapse(3) for (v1 = 0; v1 < 20; v1 += 2) for (v2 = 30; v2 < 20; v2++) @@ -109,6 +127,7 @@ __attribute__((noinline, noclone)) void N(f9) (void) { int i; + OMPTGT #pragma omp F S for (i = 20; i < 10; i++) { @@ -122,6 +141,7 @@ __attribute__((noinline, noclone)) void N(f10) (void) { SC int i; + OMPTGT #pragma omp F S collapse(3) for (i = 0; i < 10; i++) for (int j = 10; j < 8; j++) @@ -137,6 +157,7 @@ __attribute__((noinline, noclone)) void N(f11) (int n) { int i; + OMPTGT #pragma omp F S for (i = 20; i < n; i++) { @@ -150,6 +171,7 @@ __attribute__((noinline, noclone)) void N(f12) (int n) { SC int i; + OMPTGT #pragma omp F S collapse(3) for (i = 0; i < 10; i++) for (int j = n; j < 8; j++) @@ -165,6 +187,7 @@ __attribute__((noinline, noclone)) void N(f13) (void) { int *i; + OMPTGT #pragma omp F S for (i = a; i < &a[1500]; i++) i[0] += 2; @@ -174,6 +197,7 @@ __attribute__((noinline, noclone)) void N(f14) (void) { SC float *i; + OMPTGT #pragma omp F S collapse(3) for (i = &b[0][0][0]; i < &b[0][0][10]; i++) for (float *j = &b[0][15][0]; j > &b[0][0][0]; j -= 10) @@ -188,27 +212,34 @@ N(test) (void) int i, j, k; for (i = 0; i < 1500; i++) a[i] = i - 25; + OMPTO (a); N(f0) (); + OMPFROM (a); for (i = 0; i < 1500; i++) if (a[i] != i - 23) return 1; N(f1) (); + OMPFROM (a); for (i = 0; i < 1500; i++) if (a[i] != i - 25) return 1; N(f2) (); + OMPFROM (a); for (i = 0; i < 1500; i++) if (a[i] != i - 29) return 1; N(f3) (1500LL - 1 - 23 - 48, -1LL + 25 - 48, 1LL); + OMPFROM (a); for (i = 0; i < 1500; i++) if (a[i] != i - 22) return 1; N(f3) (1500LL - 1 - 23 - 48, 1500LL - 1, 7LL); + OMPFROM (a); for (i = 0; i < 1500; i++) if (a[i] != i - 22) return 1; N(f4) (); + OMPFROM (a); for (i = 0; i < 1500; i++) if (a[i] != i - 22) return 1; @@ -216,31 +247,37 @@ N(test) (void) for (j = 0; j < 15; j++) for (k = 0; k < 10; k++) b[i][j][k] = i - 2.5 + 1.5 * j - 1.5 * k; + OMPTO (b); N(f5) (0, 10, 0, 15, 0, 10, 1, 1, 1); + OMPFROM (b); for (i = 0; i < 10; i++) for (j = 0; j < 15; j++) for (k = 0; k < 10; k++) if (b[i][j][k] != i + 1.5 * j - 1.5 * k) return 1; N(f5) (0, 10, 30, 15, 0, 10, 4, 5, 6); + OMPFROM (b); for (i = 0; i < 10; i++) for (j = 0; j < 15; j++) for (k = 0; k < 10; k++) if (b[i][j][k] != i + 1.5 * j - 1.5 * k) return 1; N(f6) (9, -1, 29, 0, 9, -1, -1, -2, -1); + OMPFROM (b); for (i = 0; i < 10; i++) for (j = 0; j < 15; j++) for (k = 0; k < 10; k++) if (b[i][j][k] != i - 4.5 + 1.5 * j - 1.5 * k) return 1; N(f7) (); + OMPFROM (b); for (i = 0; i < 10; i++) for (j = 0; j < 15; j++) for (k = 0; k < 10; k++) if (b[i][j][k] != i + 1.0 + 1.5 * j - 1.5 * k) return 1; N(f8) (); + OMPFROM (b); for (i = 0; i < 10; i++) for (j = 0; j < 15; j++) for (k = 0; k < 10; k++) @@ -250,6 +287,8 @@ N(test) (void) N(f10) (); N(f11) (10); N(f12) (12); + OMPFROM (a); + OMPFROM (b); for (i = 0; i < 1500; i++) if (a[i] != i - 22) return 1; @@ -260,6 +299,8 @@ N(test) (void) return 1; N(f13) (); N(f14) (); + OMPFROM (a); + OMPFROM (b); for (i = 0; i < 1500; i++) if (a[i] != i - 20) return 1; --- libgomp/testsuite/libgomp.c/for-5.c.jj 2015-07-16 13:16:59.718048044 +0200 +++ libgomp/testsuite/libgomp.c/for-5.c 2015-07-16 17:32:58.233776535 +0200 @@ -0,0 +1,154 @@ +/* { dg-additional-options "-std=gnu99" } */ + +extern void abort (); + +#define M(x, y, z) O(x, y, z) +#define O(x, y, z) x ## _ ## y ## _ ## z + +#pragma omp declare target + +#define F for +#define G f +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#pragma omp end declare target + +#undef OMPFROM +#undef OMPTO +#define DO_PRAGMA(x) _Pragma (#x) +#define OMPFROM(v) DO_PRAGMA (omp target update from(v)) +#define OMPTO(v) DO_PRAGMA (omp target update to(v)) + +#define F target parallel for +#define G tpf +#include "for-1.h" +#undef F +#undef G + +#define F target simd +#define G t_simd +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target parallel for simd +#define G tpf_simd +#include "for-1.h" +#undef F +#undef G + +#define F target teams distribute +#define G ttd +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target teams distribute +#define G ttd_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target teams distribute simd +#define G ttds +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target teams distribute simd +#define G ttds_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target teams distribute parallel for +#define G ttdpf +#include "for-1.h" +#undef F +#undef G + +#define F target teams distribute parallel for dist_schedule(static, 128) +#define G ttdpf_ds128 +#include "for-1.h" +#undef F +#undef G + +#define F target teams distribute parallel for simd +#define G ttdpfs +#include "for-1.h" +#undef F +#undef G + +#define F target teams distribute parallel for simd dist_schedule(static, 128) +#define G ttdpfs_ds128 +#include "for-1.h" +#undef F +#undef G + +int +main () +{ + if (test_tpf_static () + || test_tpf_static32 () + || test_tpf_auto () + || test_tpf_guided32 () + || test_tpf_runtime () + || test_t_simd_normal () + || test_tpf_simd_static () + || test_tpf_simd_static32 () + || test_tpf_simd_auto () + || test_tpf_simd_guided32 () + || test_tpf_simd_runtime () + || test_ttd_normal () + || test_ttd_ds128_normal () + || test_ttds_normal () + || test_ttds_ds128_normal () + || test_ttdpf_static () + || test_ttdpf_static32 () + || test_ttdpf_auto () + || test_ttdpf_guided32 () + || test_ttdpf_runtime () + || test_ttdpf_ds128_static () + || test_ttdpf_ds128_static32 () + || test_ttdpf_ds128_auto () + || test_ttdpf_ds128_guided32 () + || test_ttdpf_ds128_runtime () + || test_ttdpfs_static () + || test_ttdpfs_static32 () + || test_ttdpfs_auto () + || test_ttdpfs_guided32 () + || test_ttdpfs_runtime () + || test_ttdpfs_ds128_static () + || test_ttdpfs_ds128_static32 () + || test_ttdpfs_ds128_auto () + || test_ttdpfs_ds128_guided32 () + || test_ttdpfs_ds128_runtime ()) + abort (); + return 0; +} --- libgomp/testsuite/libgomp.c/for-6.c.jj 2015-07-16 13:37:27.272537773 +0200 +++ libgomp/testsuite/libgomp.c/for-6.c 2015-07-16 17:32:18.320792414 +0200 @@ -0,0 +1,123 @@ +/* { dg-additional-options "-std=gnu99" } */ + +extern void abort (); + +#define M(x, y, z) O(x, y, z) +#define O(x, y, z) x ## _ ## y ## _ ## z + +#pragma omp declare target + +#define F for +#define G f +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#pragma omp end declare target + +#undef OMPTGT +#undef OMPFROM +#undef OMPTO +#define DO_PRAGMA(x) _Pragma (#x) +#define OMPTGT DO_PRAGMA (omp target) +#define OMPFROM(v) DO_PRAGMA (omp target update from(v)) +#define OMPTO(v) DO_PRAGMA (omp target update to(v)) + +#define F teams distribute +#define G td +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F teams distribute +#define G td_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F teams distribute simd +#define G tds +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F teams distribute simd +#define G tds_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F teams distribute parallel for +#define G tdpf +#include "for-1.h" +#undef F +#undef G + +#define F teams distribute parallel for dist_schedule(static, 128) +#define G tdpf_ds128 +#include "for-1.h" +#undef F +#undef G + +#define F teams distribute parallel for simd +#define G tdpfs +#include "for-1.h" +#undef F +#undef G + +#define F teams distribute parallel for simd dist_schedule(static, 128) +#define G tdpfs_ds128 +#include "for-1.h" +#undef F +#undef G + +int +main () +{ + if (test_td_normal () + || test_td_ds128_normal () + || test_tds_normal () + || test_tds_ds128_normal () + || test_tdpf_static () + || test_tdpf_static32 () + || test_tdpf_auto () + || test_tdpf_guided32 () + || test_tdpf_runtime () + || test_tdpf_ds128_static () + || test_tdpf_ds128_static32 () + || test_tdpf_ds128_auto () + || test_tdpf_ds128_guided32 () + || test_tdpf_ds128_runtime () + || test_tdpfs_static () + || test_tdpfs_static32 () + || test_tdpfs_auto () + || test_tdpfs_guided32 () + || test_tdpfs_runtime () + || test_tdpfs_ds128_static () + || test_tdpfs_ds128_static32 () + || test_tdpfs_ds128_auto () + || test_tdpfs_ds128_guided32 () + || test_tdpfs_ds128_runtime ()) + abort (); + return 0; +} --- libgomp/testsuite/libgomp.c++/for-13.C.jj 2015-07-16 13:52:48.408268151 +0200 +++ libgomp/testsuite/libgomp.c++/for-13.C 2015-07-16 17:33:15.780125013 +0200 @@ -0,0 +1,151 @@ +extern "C" void abort (); + +#define M(x, y, z) O(x, y, z) +#define O(x, y, z) x ## _ ## y ## _ ## z + +#pragma omp declare target + +#define F for +#define G f +#define S +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#pragma omp end declare target + +#undef OMPFROM +#undef OMPTO +#define DO_PRAGMA(x) _Pragma (#x) +#define OMPFROM(v) DO_PRAGMA (omp target update from(v)) +#define OMPTO(v) DO_PRAGMA (omp target update to(v)) + +#define F target parallel for +#define G tpf +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +#define F target simd +#define G t_simd +#define S +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target parallel for simd +#define G tpf_simd +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +#define F target teams distribute +#define G ttd +#define S +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target teams distribute +#define G ttd_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target teams distribute simd +#define G ttds +#define S +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target teams distribute simd +#define G ttds_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F target teams distribute parallel for +#define G ttdpf +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +#define F target teams distribute parallel for dist_schedule(static, 128) +#define G ttdpf_ds128 +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +#define F target teams distribute parallel for simd +#define G ttdpfs +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +#define F target teams distribute parallel for simd dist_schedule(static, 128) +#define G ttdpfs_ds128 +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +int +main () +{ + if (test_tpf_static () + || test_tpf_static32 () + || test_tpf_auto () + || test_tpf_guided32 () + || test_tpf_runtime () + || test_t_simd_normal () + || test_tpf_simd_static () + || test_tpf_simd_static32 () + || test_tpf_simd_auto () + || test_tpf_simd_guided32 () + || test_tpf_simd_runtime () + || test_ttd_normal () + || test_ttd_ds128_normal () + || test_ttds_normal () + || test_ttds_ds128_normal () + || test_ttdpf_static () + || test_ttdpf_static32 () + || test_ttdpf_auto () + || test_ttdpf_guided32 () + || test_ttdpf_runtime () + || test_ttdpf_ds128_static () + || test_ttdpf_ds128_static32 () + || test_ttdpf_ds128_auto () + || test_ttdpf_ds128_guided32 () + || test_ttdpf_ds128_runtime () + || test_ttdpfs_static () + || test_ttdpfs_static32 () + || test_ttdpfs_auto () + || test_ttdpfs_guided32 () + || test_ttdpfs_runtime () + || test_ttdpfs_ds128_static () + || test_ttdpfs_ds128_static32 () + || test_ttdpfs_ds128_auto () + || test_ttdpfs_ds128_guided32 () + || test_ttdpfs_ds128_runtime ()) + abort (); +} --- libgomp/testsuite/libgomp.c++/for-14.C.jj 2015-07-16 13:52:51.078158343 +0200 +++ libgomp/testsuite/libgomp.c++/for-14.C 2015-07-16 17:33:23.504831156 +0200 @@ -0,0 +1,120 @@ +extern "C" void abort (); + +#define M(x, y, z) O(x, y, z) +#define O(x, y, z) x ## _ ## y ## _ ## z + +#pragma omp declare target + +#define F for +#define G f +#define S +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#pragma omp end declare target + +#undef OMPTGT +#undef OMPFROM +#undef OMPTO +#define DO_PRAGMA(x) _Pragma (#x) +#define OMPTGT DO_PRAGMA (omp target) +#define OMPFROM(v) DO_PRAGMA (omp target update from(v)) +#define OMPTO(v) DO_PRAGMA (omp target update to(v)) + +#define F teams distribute +#define G td +#define S +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F teams distribute +#define G td_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F teams distribute simd +#define G tds +#define S +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F teams distribute simd +#define G tds_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "../libgomp.c/for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F teams distribute parallel for +#define G tdpf +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +#define F teams distribute parallel for dist_schedule(static, 128) +#define G tdpf_ds128 +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +#define F teams distribute parallel for simd +#define G tdpfs +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +#define F teams distribute parallel for simd dist_schedule(static, 128) +#define G tdpfs_ds128 +#include "../libgomp.c/for-1.h" +#undef F +#undef G + +int +main () +{ + if (test_td_normal () + || test_td_ds128_normal () + || test_tds_normal () + || test_tds_ds128_normal () + || test_tdpf_static () + || test_tdpf_static32 () + || test_tdpf_auto () + || test_tdpf_guided32 () + || test_tdpf_runtime () + || test_tdpf_ds128_static () + || test_tdpf_ds128_static32 () + || test_tdpf_ds128_auto () + || test_tdpf_ds128_guided32 () + || test_tdpf_ds128_runtime () + || test_tdpfs_static () + || test_tdpfs_static32 () + || test_tdpfs_auto () + || test_tdpfs_guided32 () + || test_tdpfs_runtime () + || test_tdpfs_ds128_static () + || test_tdpfs_ds128_static32 () + || test_tdpfs_ds128_auto () + || test_tdpfs_ds128_guided32 () + || test_tdpfs_ds128_runtime ()) + abort (); +} Jakub