Hi!

The constructs with order(concurrent) clause have some extra restrictions,
this patch diagnoses those that are easy to diagnose.

Bootstrapped/regtested on x86_64-linux and i686-linux, committed to trunk.

2019-07-13  Jakub Jelinek  <ja...@redhat.com>

        * gimplify.c (struct gimplify_omp_ctx): Add order_concurrent member.
        (omp_notice_threadprivate_variable): Diagnose threadprivate variable
        uses inside of order(concurrent) constructs.
        (gimplify_scan_omp_clauses): Set ctx->order_concurrent if
        OMP_CLAUSE_ORDER is seen.
        * omp-low.c (struct omp_context): Add order_concurrent member.
        (scan_sharing_clauses): Set ctx->order_concurrent if
        OMP_CLAUSE_ORDER is seen.
        (check_omp_nesting_restrictions): Diagnose ordered or atomic inside
        of simd order(concurrent).  Diagnose constructs not allowed inside of
        for order(concurrent).
        (setjmp_or_longjmp_p): Add a context and TREE_PUBLIC check to avoid
        complaining about static double setjmp (double); or class static
        methods or non-global namespace setjmps.
        (omp_runtime_api_call): New function.
        (scan_omp_1_stmt): Diagnose OpenMP runtime API calls inside of
        order(concurrent) loops.

        * c-c++-common/gomp/order-3.c: New test.
        * c-c++-common/gomp/order-4.c: New test.

--- gcc/gimplify.c.jj   2019-07-12 09:46:35.784602797 +0200
+++ gcc/gimplify.c      2019-07-12 11:00:21.200038178 +0200
@@ -222,6 +222,7 @@ struct gimplify_omp_ctx
   bool distribute;
   bool target_firstprivatize_array_bases;
   bool add_safelen1;
+  bool order_concurrent;
   int defaultmap[4];
 };
 
@@ -7025,14 +7026,24 @@ omp_notice_threadprivate_variable (struc
   struct gimplify_omp_ctx *octx;
 
   for (octx = ctx; octx; octx = octx->outer_context)
-    if ((octx->region_type & ORT_TARGET) != 0)
+    if ((octx->region_type & ORT_TARGET) != 0
+       || octx->order_concurrent)
       {
        n = splay_tree_lookup (octx->variables, (splay_tree_key)decl);
        if (n == NULL)
          {
-           error ("threadprivate variable %qE used in target region",
-                  DECL_NAME (decl));
-           error_at (octx->location, "enclosing target region");
+           if (octx->order_concurrent)
+             {
+               error ("threadprivate variable %qE used in a region with"
+                      " %<order(concurrent)%> clause", DECL_NAME (decl));
+               error_at (octx->location, "enclosing region");
+             }
+           else
+             {
+               error ("threadprivate variable %qE used in target region",
+                      DECL_NAME (decl));
+               error_at (octx->location, "enclosing target region");
+             }
            splay_tree_insert (octx->variables, (splay_tree_key)decl, 0);
          }
        if (decl2)
@@ -9263,11 +9274,14 @@ gimplify_scan_omp_clauses (tree *list_p,
        case OMP_CLAUSE_NOGROUP:
        case OMP_CLAUSE_THREADS:
        case OMP_CLAUSE_SIMD:
-       case OMP_CLAUSE_ORDER:
        case OMP_CLAUSE_IF_PRESENT:
        case OMP_CLAUSE_FINALIZE:
          break;
 
+       case OMP_CLAUSE_ORDER:
+         ctx->order_concurrent = true;
+         break;
+
        case OMP_CLAUSE_DEFAULTMAP:
          enum gimplify_defaultmap_kind gdmkmin, gdmkmax;
          switch (OMP_CLAUSE_DEFAULTMAP_CATEGORY (c))
--- gcc/omp-low.c.jj    2019-07-12 09:46:35.786602766 +0200
+++ gcc/omp-low.c       2019-07-12 13:15:44.566151747 +0200
@@ -150,6 +150,9 @@ struct omp_context
 
   /* True in the second simd loop of for simd with inscan reductions.  */
   bool for_simd_scan_phase;
+
+  /* True if there is order(concurrent) clause on the construct.  */
+  bool order_concurrent;
 };
 
 static splay_tree all_contexts;
@@ -1390,6 +1393,10 @@ scan_sharing_clauses (tree clauses, omp_
            }
          break;
 
+       case OMP_CLAUSE_ORDER:
+         ctx->order_concurrent = true;
+         break;
+
        case OMP_CLAUSE_NOWAIT:
        case OMP_CLAUSE_ORDERED:
        case OMP_CLAUSE_COLLAPSE:
@@ -1402,7 +1409,6 @@ scan_sharing_clauses (tree clauses, omp_
        case OMP_CLAUSE_SIMD:
        case OMP_CLAUSE_NOGROUP:
        case OMP_CLAUSE_DEFAULTMAP:
-       case OMP_CLAUSE_ORDER:
        case OMP_CLAUSE_ASYNC:
        case OMP_CLAUSE_WAIT:
        case OMP_CLAUSE_GANG:
@@ -2669,9 +2675,20 @@ check_omp_nesting_restrictions (gimple *
          && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
        ctx = ctx->outer;
       if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
-         && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
+         && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
        {
          c = NULL_TREE;
+         if (ctx->order_concurrent
+             && (gimple_code (stmt) == GIMPLE_OMP_ORDERED
+                 || gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
+                 || gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
+           {
+             error_at (gimple_location (stmt),
+                       "OpenMP constructs other than %<parallel%> or"
+                       " %<simd%> may not be nested inside a region with"
+                       " the %<order(concurrent)%> clause");
+             return false;
+           }
          if (gimple_code (stmt) == GIMPLE_OMP_ORDERED)
            {
              c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
@@ -2717,6 +2734,18 @@ check_omp_nesting_restrictions (gimple *
              return false;
            }
        }
+      else if (ctx->order_concurrent
+              && gimple_code (stmt) != GIMPLE_OMP_PARALLEL
+              && (gimple_code (stmt) != GIMPLE_OMP_FOR
+                  || gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_SIMD)
+              && gimple_code (stmt) != GIMPLE_OMP_SCAN)
+       {
+         error_at (gimple_location (stmt),
+                   "OpenMP constructs other than %<parallel%> or"
+                   " %<simd%> may not be nested inside a region with"
+                   " the %<order(concurrent)%> clause");
+         return false;
+       }
     }
   switch (gimple_code (stmt))
     {
@@ -3323,12 +3352,123 @@ setjmp_or_longjmp_p (const_tree fndecl)
     return true;
 
   tree declname = DECL_NAME (fndecl);
-  if (!declname)
+  if (!declname
+      || (DECL_CONTEXT (fndecl) != NULL_TREE
+          && TREE_CODE (DECL_CONTEXT (fndecl)) != TRANSLATION_UNIT_DECL)
+      || !TREE_PUBLIC (fndecl))
     return false;
+
   const char *name = IDENTIFIER_POINTER (declname);
   return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
 }
 
+/* Return true if FNDECL is an omp_* runtime API call.  */
+
+static bool
+omp_runtime_api_call (const_tree fndecl)
+{
+  tree declname = DECL_NAME (fndecl);
+  if (!declname
+      || (DECL_CONTEXT (fndecl) != NULL_TREE
+          && TREE_CODE (DECL_CONTEXT (fndecl)) != TRANSLATION_UNIT_DECL)
+      || !TREE_PUBLIC (fndecl))
+    return false;
+
+  const char *name = IDENTIFIER_POINTER (declname);
+  if (strncmp (name, "omp_", 4) != 0)
+    return false;
+
+  static const char *omp_runtime_apis[] =
+    {
+      /* This array has 3 sections.  First omp_* calls that don't
+        have any suffixes.  */
+      "target_alloc",
+      "target_associate_ptr",
+      "target_disassociate_ptr",
+      "target_free",
+      "target_is_present",
+      "target_memcpy",
+      "target_memcpy_rect",
+      NULL,
+      /* Now omp_* calls that are available as omp_* and omp_*_.  */
+      "capture_affinity",
+      "destroy_lock",
+      "destroy_nest_lock",
+      "display_affinity",
+      "get_active_level",
+      "get_affinity_format",
+      "get_cancellation",
+      "get_default_device",
+      "get_dynamic",
+      "get_initial_device",
+      "get_level",
+      "get_max_active_levels",
+      "get_max_task_priority",
+      "get_max_threads",
+      "get_nested",
+      "get_num_devices",
+      "get_num_places",
+      "get_num_procs",
+      "get_num_teams",
+      "get_num_threads",
+      "get_partition_num_places",
+      "get_place_num",
+      "get_proc_bind",
+      "get_team_num",
+      "get_thread_limit",
+      "get_thread_num",
+      "get_wtick",
+      "get_wtime",
+      "in_final",
+      "in_parallel",
+      "init_lock",
+      "init_nest_lock",
+      "is_initial_device",
+      "pause_resource",
+      "pause_resource_all",
+      "set_affinity_format",
+      "set_lock",
+      "set_nest_lock",
+      "test_lock",
+      "test_nest_lock",
+      "unset_lock",
+      "unset_nest_lock",
+      NULL,
+      /* And finally calls available as omp_*, omp_*_ and omp_*_8_.  */
+      "get_ancestor_thread_num",
+      "get_partition_place_nums",
+      "get_place_num_procs",
+      "get_place_proc_ids",
+      "get_schedule",
+      "get_team_size",
+      "set_default_device",
+      "set_dynamic",
+      "set_max_active_levels",
+      "set_nested",
+      "set_num_threads",
+      "set_schedule"
+    };
+
+  int mode = 0;
+  for (unsigned i = 0; i < ARRAY_SIZE (omp_runtime_apis); i++)
+    {
+      if (omp_runtime_apis[i] == NULL)
+       {
+         mode++;
+         continue;
+       }
+      size_t len = strlen (omp_runtime_apis[i]);
+      if (strncmp (name + 4, omp_runtime_apis[i], len) == 0
+         && (name[4 + len] == '\0'
+             || (mode > 0
+                 && name[4 + len] == '_'
+                 && (name[4 + len + 1] == '\0'
+                     || (mode > 1
+                         && strcmp (name + 4 + len + 1, "8_") == 0)))))
+       return true;
+    }
+  return false;
+}
 
 /* Helper function for scan_omp.
 
@@ -3354,10 +3494,10 @@ scan_omp_1_stmt (gimple_stmt_iterator *g
       tree fndecl = gimple_call_fndecl (stmt);
       if (fndecl)
        {
-         if (setjmp_or_longjmp_p (fndecl)
-             && ctx
+         if (ctx
              && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
-             && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
+             && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_SIMD
+             && setjmp_or_longjmp_p (fndecl))
            {
              remove = true;
              error_at (gimple_location (stmt),
@@ -3378,6 +3518,19 @@ scan_omp_1_stmt (gimple_stmt_iterator *g
              default:
                break;
              }
+         else if (ctx)
+           {
+             omp_context *octx = ctx;
+             if (gimple_code (ctx->stmt) == GIMPLE_OMP_SCAN && ctx->outer)
+               octx = ctx->outer;
+             if (octx->order_concurrent && omp_runtime_api_call (fndecl))
+               {
+                 remove = true;
+                 error_at (gimple_location (stmt),
+                           "OpenMP runtime API call %qD in a region with "
+                           "%<order(concurrent)%> clause", fndecl);
+               }
+           }
        }
     }
   if (remove)
--- gcc/testsuite/c-c++-common/gomp/order-3.c.jj        2019-07-12 
13:26:48.035042309 +0200
+++ gcc/testsuite/c-c++-common/gomp/order-3.c   2019-07-12 13:58:37.847954296 
+0200
@@ -0,0 +1,212 @@
+void foo (void);
+int v;
+#ifdef __cplusplus
+extern "C" {
+#endif
+int omp_get_thread_num (void);
+int omp_get_num_threads (void);
+int omp_target_is_present (const void *, int);
+int omp_get_cancellation (void);
+#ifdef __cplusplus
+}
+#endif
+
+void
+f1 (int *a)
+{
+  int i;
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel             /* { dg-error "OpenMP constructs other 
than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested 
inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd                 /* { dg-error "OpenMP constructs other 
than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested 
inside 'simd' region" } */
+      for (j = 0; j < 64; j++)
+       a[64 * i + j] = i + j;
+    }
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical             /* { dg-error "OpenMP constructs other 
than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested 
inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd         /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic               /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read
+      a[i] = v;                                /* { dg-error "OpenMP 
constructs other than 'parallel' or 'simd' may not be nested inside a region 
with the 'order\\(concurrent\\)' clause" } */
+    }
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write         /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];                                /* { dg-error "OpenMP 
constructs other than 'parallel' or 'simd' may not be nested inside a region 
with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();     /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' 
clause" } */
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();    /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);  /* { dg-error "OpenMP runtime 
API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+  #pragma omp simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();   /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+}
+
+void
+f2 (int *a)
+{
+  int i;
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel             /* { dg-error "OpenMP constructs other 
than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested 
inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd                 /* { dg-error "OpenMP constructs other 
than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested 
inside 'simd' region" } */
+      for (j = 0; j < 64; j++)
+       a[64 * i + j] = i + j;
+    }
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical             /* { dg-error "OpenMP constructs other 
than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested 
inside 'simd' region" } */
+      foo ();
+    }
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd         /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic               /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read
+      a[i] = v;                                /* { dg-error "OpenMP 
constructs other than 'parallel' or 'simd' may not be nested inside a region 
with the 'order\\(concurrent\\)' clause" } */
+    }
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write         /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];                                /* { dg-error "OpenMP 
constructs other than 'parallel' or 'simd' may not be nested inside a region 
with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();     /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' 
clause" } */
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();    /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);  /* { dg-error "OpenMP runtime 
API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+  #pragma omp for simd order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();   /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+}
+
+void
+f3 (int *a)
+{
+  int i;
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp parallel
+      foo ();
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp simd
+      for (j = 0; j < 64; j++)
+       a[64 * i + j] = i + j;
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp critical             /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp ordered simd         /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      foo ();
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic               /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      v++;
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic read
+      a[i] = v;                                /* { dg-error "OpenMP 
constructs other than 'parallel' or 'simd' may not be nested inside a region 
with the 'order\\(concurrent\\)' clause" } */
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp atomic write         /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" "" { target c++ } } */
+      v = a[i];                                /* { dg-error "OpenMP 
constructs other than 'parallel' or 'simd' may not be nested inside a region 
with the 'order\\(concurrent\\)' clause" "" { target c } } */
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      #pragma omp task                 /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      a[i]++;
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    {
+      int j;
+      #pragma omp taskloop             /* { dg-error "OpenMP constructs other 
than 'parallel' or 'simd' may not be nested inside a region with the 
'order\\(concurrent\\)' clause" } */
+      for (j = 0; j < 64; j++)
+       a[64 * i + j] = i + j;
+    }
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_thread_num ();     /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' 
clause" } */
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_num_threads ();    /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_target_is_present (a + i, 0);  /* { dg-error "OpenMP runtime 
API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+  #pragma omp for order(concurrent)
+  for (i = 0; i < 64; i++)
+    a[i] += omp_get_cancellation ();   /* { dg-error "OpenMP runtime API call 
'\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 
'order\\(concurrent\\)' clause" } */
+}
--- gcc/testsuite/c-c++-common/gomp/order-4.c.jj        2019-07-12 
14:00:19.357408353 +0200
+++ gcc/testsuite/c-c++-common/gomp/order-4.c   2019-07-12 14:02:49.067128347 
+0200
@@ -0,0 +1,29 @@
+int t;
+#pragma omp threadprivate(t)
+
+void
+f1 (void)
+{
+  int i;
+  #pragma omp simd order(concurrent)   /* { dg-error "enclosing region" } */
+  for (i = 0; i < 64; i++)
+    t++;       /* { dg-error "threadprivate variable 't' used in a region with 
'order\\(concurrent\\)' clause" } */
+}
+
+void
+f2 (void)
+{
+  int i;
+  #pragma omp for simd order(concurrent)       /* { dg-error "enclosing 
region" } */
+  for (i = 0; i < 64; i++)                     /* { dg-error "enclosing 
region" "" { target c++ } } */
+    t++;       /* { dg-error "threadprivate variable 't' used in a region with 
'order\\(concurrent\\)' clause" } */
+}
+
+void
+f3 (void)
+{
+  int i;
+  #pragma omp for order(concurrent)    /* { dg-error "enclosing region" } */
+  for (i = 0; i < 64; i++)
+    t++;       /* { dg-error "threadprivate variable 't' used in a region with 
'order\\(concurrent\\)' clause" } */
+}

        Jakub

Reply via email to