From: Kwok Cheung Yeung <kcye...@baylibre.com>

libgomp/

        * testsuite/libgomp.c++/target-std__array-concurrent.C: New.
        * testsuite/libgomp.c++/target-std__bitset-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__deque-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__flat_map-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__flat_multimap-concurrent.C: 
Likewise.
        * testsuite/libgomp.c++/target-std__flat_multiset-concurrent.C: 
Likewise.
        * testsuite/libgomp.c++/target-std__flat_set-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__forward_list-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__list-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__map-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__multimap-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__multiset-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__set-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__span-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__unordered_map-concurrent.C: 
Likewise.
        * testsuite/libgomp.c++/target-std__unordered_multimap-concurrent.C: 
Likewise.
        * testsuite/libgomp.c++/target-std__unordered_multiset-concurrent.C: 
Likewise.
        * testsuite/libgomp.c++/target-std__unordered_set-concurrent.C: 
Likewise.
        * testsuite/libgomp.c++/target-std__valarray-concurrent.C: Likewise.
        * testsuite/libgomp.c++/target-std__vector-concurrent.C: Likewise.

Co-authored-by: Thomas Schwinge <tschwi...@baylibre.com>
---
 .../target-std__array-concurrent.C            | 60 ++++++++++++++
 .../target-std__bitset-concurrent.C           | 67 +++++++++++++++
 .../target-std__deque-concurrent.C            | 62 ++++++++++++++
 .../target-std__flat_map-concurrent.C         | 71 ++++++++++++++++
 .../target-std__flat_multimap-concurrent.C    | 70 ++++++++++++++++
 .../target-std__flat_multiset-concurrent.C    | 60 ++++++++++++++
 .../target-std__flat_set-concurrent.C         | 67 +++++++++++++++
 .../target-std__forward_list-concurrent.C     | 81 +++++++++++++++++++
 .../libgomp.c++/target-std__list-concurrent.C | 81 +++++++++++++++++++
 .../libgomp.c++/target-std__map-concurrent.C  | 66 +++++++++++++++
 .../target-std__multimap-concurrent.C         | 64 +++++++++++++++
 .../target-std__multiset-concurrent.C         | 60 ++++++++++++++
 .../libgomp.c++/target-std__set-concurrent.C  | 66 +++++++++++++++
 .../libgomp.c++/target-std__span-concurrent.C | 62 ++++++++++++++
 .../target-std__unordered_map-concurrent.C    | 66 +++++++++++++++
 ...arget-std__unordered_multimap-concurrent.C | 65 +++++++++++++++
 ...arget-std__unordered_multiset-concurrent.C | 59 ++++++++++++++
 .../target-std__unordered_set-concurrent.C    | 66 +++++++++++++++
 .../target-std__valarray-concurrent.C         | 64 +++++++++++++++
 .../target-std__vector-concurrent.C           | 61 ++++++++++++++
 20 files changed, 1318 insertions(+)
 create mode 100644 libgomp/testsuite/libgomp.c++/target-std__array-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__bitset-concurrent.C
 create mode 100644 libgomp/testsuite/libgomp.c++/target-std__deque-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__flat_map-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__flat_multimap-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__flat_multiset-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__flat_set-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__forward_list-concurrent.C
 create mode 100644 libgomp/testsuite/libgomp.c++/target-std__list-concurrent.C
 create mode 100644 libgomp/testsuite/libgomp.c++/target-std__map-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__multimap-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__multiset-concurrent.C
 create mode 100644 libgomp/testsuite/libgomp.c++/target-std__set-concurrent.C
 create mode 100644 libgomp/testsuite/libgomp.c++/target-std__span-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__unordered_map-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__unordered_multimap-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__unordered_multiset-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__unordered_set-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__valarray-concurrent.C
 create mode 100644 
libgomp/testsuite/libgomp.c++/target-std__vector-concurrent.C

diff --git a/libgomp/testsuite/libgomp.c++/target-std__array-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__array-concurrent.C
new file mode 100644
index 00000000000..e97bfe60a61
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__array-concurrent.C
@@ -0,0 +1,60 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <array>
+#include <algorithm>
+
+#define N 50000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand ();
+}
+
+#pragma omp declare target
+bool validate (const std::array<int,N> &arr, int data[])
+{
+  for (int i = 0; i < N; ++i)
+    if (arr[i] != data[i] * data[i])
+      return false;
+  return true;
+}
+#pragma omp end declare target
+
+int main (void)
+{
+  int data[N];
+  bool ok;
+  std::array<int,N> arr;
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target data map (to: data[:N]) map (alloc: arr)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&arr) std::array<int,N> ();
+#endif
+         std::copy (data, data + N, arr.begin ());
+       }
+
+      #pragma omp target teams distribute parallel for
+       for (int i = 0; i < N; ++i)
+         arr[i] *= arr[i];
+
+      #pragma omp target map (from: ok)
+       {
+         ok = validate (arr, data);
+#ifndef MEM_SHARED
+         arr.~array ();
+#endif
+       }
+    }
+
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__bitset-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__bitset-concurrent.C
new file mode 100644
index 00000000000..aa27662314b
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__bitset-concurrent.C
@@ -0,0 +1,67 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <bitset>
+#include <set>
+#include <algorithm>
+
+#define N 4000
+#define MAX 16384
+
+void init (int data[])
+{
+  std::set<int> _set;
+  for (int i = 0; i < N; ++i)
+    {
+      // Avoid duplicates in data array.
+      do
+       data[i] = rand () % MAX;
+      while (_set.find (data[i]) != _set.end ());
+      _set.insert (data[i]);
+    }
+}
+
+bool validate (int sum, int data[])
+{
+  int total = 0;
+  for (int i = 0; i < N; ++i)
+    total += data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int data[N];
+  std::bitset<MAX> _set;
+  int sum = 0;
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target data map (to: data[:N]) map (alloc: _set)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&_set) std::bitset<MAX> ();
+#endif
+         for (int i = 0; i < N; ++i)
+           _set[data[i]] = true;
+       }
+
+      #pragma omp target teams distribute parallel for reduction (+:sum)
+       for (int i = 0; i < MAX; ++i)
+         if (_set[i])
+           sum += i;
+
+#ifndef MEM_SHARED
+      #pragma omp target
+       _set.~bitset ();
+#endif
+    }
+
+  bool ok = validate (sum, data);
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__deque-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__deque-concurrent.C
new file mode 100644
index 00000000000..5f08bfb002b
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__deque-concurrent.C
@@ -0,0 +1,62 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <deque>
+#include <algorithm>
+
+#define N 50000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand ();
+}
+
+#pragma omp declare target
+bool validate (const std::deque<int> &_deque, int data[])
+{
+  for (int i = 0; i < N; ++i)
+    if (_deque[i] != data[i] * data[i])
+      return false;
+  return true;
+}
+#pragma omp end declare target
+
+int main (void)
+{
+  int data[N];
+  bool ok;
+
+  srand (time (NULL));
+  init (data);
+
+#ifdef MEM_SHARED
+  std::deque<int> _deque (std::begin (data), std::end (data));
+#else
+  std::deque<int> _deque;
+#endif
+
+  #pragma omp target data map (to: data[:N]) map (alloc: _deque)
+    {
+#ifndef MEM_SHARED
+      #pragma omp target
+       new (&_deque) std::deque<int> (std::begin (data), std::end (data));
+#endif
+
+      #pragma omp target teams distribute parallel for
+       for (int i = 0; i < N; ++i)
+         _deque[i] *= _deque[i];
+
+      #pragma omp target map (from: ok)
+       {
+         ok = validate (_deque, data);
+#ifndef MEM_SHARED
+         _deque.~deque ();
+#endif
+       }
+    }
+
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__flat_map-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__flat_map-concurrent.C
new file mode 100644
index 00000000000..9e59907c09b
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__flat_map-concurrent.C
@@ -0,0 +1,71 @@
+// { dg-do run }
+// { dg-additional-options "-std=c++23" }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+/* { dg-ice {TODO PR120450} { offload_target_amdgcn && { ! 
offload_device_shared_as } } }
+   { dg-excess-errors {'mkoffload' failure etc.} { xfail { 
offload_target_amdgcn && { ! offload_device_shared_as } } } }
+   (For effective-target 'offload_device_shared_as', we've got '-DMEM_SHARED', 
and therefore don't invoke the constructor with placement new.)  */
+
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+#include <flat_map>
+
+#define N 3000
+
+void init (int data[], bool unique)
+{
+  std::set<int> _set;
+  for (int i = 0; i < N; ++i)
+    {
+      // Avoid duplicates in data array if unique is true.
+      do
+       data[i] = rand ();
+      while (unique && _set.count (data[i]) > 0);
+      _set.insert (data[i]);
+    }
+}
+
+bool validate (long long sum, int keys[], int data[])
+{
+  long long total = 0;
+  for (int i = 0; i < N; ++i)
+    total += (long long) keys[i] * data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int keys[N], data[N];
+  std::flat_map<int,int> _map;
+
+  srand (time (NULL));
+  init (keys, true);
+  init (data, false);
+
+  #pragma omp target enter data map (to: keys[:N], data[:N]) map (alloc: _map)
+
+  #pragma omp target
+    {
+#ifndef MEM_SHARED
+      new (&_map) std::flat_map<int,int> ();
+#endif
+      for (int i = 0; i < N; ++i)
+       _map[keys[i]] = data[i];
+    }
+
+  long long sum = 0;
+  #pragma omp target teams distribute parallel for reduction (+:sum)
+    for (int i = 0; i < N; ++i)
+      sum += (long long) keys[i] * _map[keys[i]];
+
+#ifndef MEM_SHARED
+  #pragma omp target
+    _map.~flat_map ();
+#endif
+
+  #pragma omp target exit data map (release: _map)
+
+  bool ok = validate (sum, keys, data);
+  return ok ? 0 : 1;
+}
diff --git 
a/libgomp/testsuite/libgomp.c++/target-std__flat_multimap-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__flat_multimap-concurrent.C
new file mode 100644
index 00000000000..1dc60c8a627
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__flat_multimap-concurrent.C
@@ -0,0 +1,70 @@
+// { dg-do run }
+// { dg-additional-options "-std=c++23" }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+/* { dg-ice {TODO PR120450} { offload_target_amdgcn && { ! 
offload_device_shared_as } } }
+   { dg-excess-errors {'mkoffload' failure etc.} { xfail { 
offload_target_amdgcn && { ! offload_device_shared_as } } } }
+   (For effective-target 'offload_device_shared_as', we've got '-DMEM_SHARED', 
and therefore don't invoke the constructor with placement new.)  */
+
+#include <stdlib.h>
+#include <time.h>
+#include <flat_map>
+
+// Make sure that KEY_MAX is less than N to ensure some duplicate keys.
+#define N 3000
+#define KEY_MAX 1000
+
+void init (int data[], int max)
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = i % max;
+}
+
+bool validate (long long sum, int keys[], int data[])
+{
+  long long total = 0;
+  for (int i = 0; i < N; ++i)
+    total += (long long) keys[i] * data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int keys[N], data[N];
+  std::flat_multimap<int,int> _map;
+
+  srand (time (NULL));
+  init (keys, KEY_MAX);
+  init (data, RAND_MAX);
+
+  #pragma omp target enter data map (to: keys[:N], data[:N]) map (alloc: _map)
+
+  #pragma omp target
+    {
+#ifndef MEM_SHARED
+      new (&_map) std::flat_multimap<int,int> ();
+#endif
+      for (int i = 0; i < N; ++i)
+       _map.insert({keys[i], data[i]});
+    }
+
+  long long sum = 0;
+  #pragma omp target teams distribute parallel for reduction (+:sum)
+    for (int i = 0; i < KEY_MAX; ++i)
+      {
+       auto range = _map.equal_range (i);
+       for (auto it = range.first; it != range.second; ++it) {
+         sum += (long long) it->first * it->second;
+       }
+      }
+
+#ifndef MEM_SHARED
+  #pragma omp target
+    _map.~flat_multimap ();
+#endif
+
+  #pragma omp target exit data map (release: _map)
+
+  bool ok = validate (sum, keys, data);
+  return ok ? 0 : 1;
+}
diff --git 
a/libgomp/testsuite/libgomp.c++/target-std__flat_multiset-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__flat_multiset-concurrent.C
new file mode 100644
index 00000000000..59b59bff694
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__flat_multiset-concurrent.C
@@ -0,0 +1,60 @@
+// { dg-do run }
+// { dg-additional-options "-std=c++23" }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <flat_set>
+#include <algorithm>
+
+// MAX should be less than N to ensure that some duplicates occur.
+#define N 4000
+#define MAX 1000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand () % MAX;
+}
+
+bool validate (int sum, int data[])
+{
+  int total = 0;
+  for (int i = 0; i < N; ++i)
+    total += data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int data[N];
+  std::flat_multiset<int> set;
+  int sum = 0;
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target data map (to: data[:N]) map (alloc: set)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&set) std::flat_multiset<int> ();
+#endif
+         for (int i = 0; i < N; ++i)
+           set.insert (data[i]);
+       }
+
+      #pragma omp target teams distribute parallel for reduction (+:sum)
+       for (int i = 0; i < MAX; ++i)
+         sum += i * set.count (i);
+
+#ifndef MEM_SHARED
+      #pragma omp target
+       set.~flat_multiset ();
+#endif
+    }
+
+  bool ok = validate (sum, data);
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__flat_set-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__flat_set-concurrent.C
new file mode 100644
index 00000000000..b255cd5dca6
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__flat_set-concurrent.C
@@ -0,0 +1,67 @@
+// { dg-do run }
+// { dg-additional-options "-std=c++23" }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <flat_set>
+#include <algorithm>
+
+#define N 4000
+#define MAX 16384
+
+void init (int data[])
+{
+  std::flat_set<int> _set;
+  for (int i = 0; i < N; ++i)
+    {
+      // Avoid duplicates in data array.
+      do
+       data[i] = rand () % MAX;
+      while (_set.count (data[i]) != 0);
+      _set.insert (data[i]);
+    }
+}
+
+bool validate (int sum, int data[])
+{
+  int total = 0;
+  for (int i = 0; i < N; ++i)
+    total += data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int data[N];
+  std::flat_set<int> _set;
+  int sum = 0;
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target data map (to: data[:N]) map (alloc: _set)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&_set) std::flat_set<int> ();
+#endif
+         for (int i = 0; i < N; ++i)
+           _set.insert (data[i]);
+       }
+
+      #pragma omp target teams distribute parallel for reduction (+:sum)
+       for (int i = 0; i < MAX; ++i)
+         if (_set.count (i) > 0)
+           sum += i;
+
+#ifndef MEM_SHARED
+      #pragma omp target
+       _set.~flat_set ();
+#endif
+    }
+
+  bool ok = validate (sum, data);
+  return ok ? 0 : 1;
+}
diff --git 
a/libgomp/testsuite/libgomp.c++/target-std__forward_list-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__forward_list-concurrent.C
new file mode 100644
index 00000000000..f8ab53e30df
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__forward_list-concurrent.C
@@ -0,0 +1,81 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <omp.h>
+#include <forward_list>
+#include <algorithm>
+
+#define N 3000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand ();
+}
+
+#pragma omp declare target
+bool validate (const std::forward_list<int> &list, int data[])
+{
+  int i = 0;
+  for (auto &v : list)
+    {
+      if (v != data[i] * data[i])
+       return false;
+      ++i;
+    }
+  return true;
+}
+#pragma omp end declare target
+
+int main (void)
+{
+  int data[N];
+  bool ok;
+
+  srand (time (NULL));
+  init (data);
+
+#ifdef MEM_SHARED
+  std::forward_list<int> list (std::begin (data), std::end (data));
+#else
+  std::forward_list<int> list;
+#endif
+
+  #pragma omp target data map (to: data[:N]) map (alloc: list)
+    {
+#ifndef MEM_SHARED
+      #pragma omp target
+       new (&list) std::forward_list<int> (std::begin (data), std::end (data));
+#endif
+
+      #pragma omp target teams
+       do
+         {
+           int len = N / omp_get_num_teams () + (N % omp_get_num_teams () > 0);
+           int start = len * omp_get_team_num ();
+           if (start >= N)
+             break;
+           if (start + len >= N)
+             len = N - start;
+           auto it = list.begin ();
+           std::advance (it, start);
+           for (int i = 0; i < len; ++i)
+             {
+               *it *= *it;
+               ++it;
+             }
+         } while (false);
+
+      #pragma omp target map (from: ok)
+       {
+         ok = validate (list, data);
+#ifndef MEM_SHARED
+         list.~forward_list ();
+#endif
+       }
+    }
+
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__list-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__list-concurrent.C
new file mode 100644
index 00000000000..ce3b4265ee0
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__list-concurrent.C
@@ -0,0 +1,81 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <omp.h>
+#include <list>
+#include <algorithm>
+
+#define N 3000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand ();
+}
+
+#pragma omp declare target
+bool validate (const std::list<int> &_list, int data[])
+{
+  int i = 0;
+  for (auto &v : _list)
+    {
+      if (v != data[i] * data[i])
+       return false;
+      ++i;
+    }
+  return true;
+}
+#pragma omp end declare target
+
+int main (void)
+{
+  int data[N];
+  bool ok;
+
+  srand (time (NULL));
+  init (data);
+
+#ifdef MEM_SHARED
+  std::list<int> _list (std::begin (data), std::end (data));
+#else
+  std::list<int> _list;
+#endif
+
+  #pragma omp target data map (to: data[:N]) map (alloc: _list)
+    {
+#ifndef MEM_SHARED
+      #pragma omp target
+       new (&_list) std::list<int> (std::begin (data), std::end (data));
+#endif
+
+      #pragma omp target teams
+       do
+         {
+           int len = N / omp_get_num_teams () + (N % omp_get_num_teams () > 0);
+           int start = len * omp_get_team_num ();
+           if (start >= N)
+             break;
+           if (start + len >= N)
+             len = N - start;
+           auto it = _list.begin ();
+           std::advance (it, start);
+           for (int i = 0; i < len; ++i)
+             {
+               *it *= *it;
+               ++it;
+             }
+         } while (false);
+
+      #pragma omp target map (from: ok)
+       {
+         ok = validate (_list, data);
+#ifndef MEM_SHARED
+         _list.~list ();
+#endif
+       }
+    }
+
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__map-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__map-concurrent.C
new file mode 100644
index 00000000000..254c490afdb
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__map-concurrent.C
@@ -0,0 +1,66 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+#include <map>
+
+#define N 3000
+
+void init (int data[], bool unique)
+{
+  std::set<int> _set;
+  for (int i = 0; i < N; ++i)
+    {
+      // Avoid duplicates in data array if unique is true.
+      do
+       data[i] = rand ();
+      while (unique && _set.find (data[i]) != _set.end ());
+      _set.insert (data[i]);
+    }
+}
+
+bool validate (long long sum, int keys[], int data[])
+{
+  long long total = 0;
+  for (int i = 0; i < N; ++i)
+    total += (long long) keys[i] * data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int keys[N], data[N];
+  std::map<int,int> _map;
+
+  srand (time (NULL));
+  init (keys, true);
+  init (data, false);
+
+  #pragma omp target enter data map (to: keys[:N], data[:N]) map (alloc: _map)
+
+  #pragma omp target
+    {
+#ifndef MEM_SHARED
+      new (&_map) std::map<int,int> ();
+#endif
+      for (int i = 0; i < N; ++i)
+       _map[keys[i]] = data[i];
+    }
+
+  long long sum = 0;
+  #pragma omp target teams distribute parallel for reduction (+:sum)
+    for (int i = 0; i < N; ++i)
+      sum += (long long) keys[i] * _map[keys[i]];
+
+#ifndef MEM_SHARED
+  #pragma omp target
+    _map.~map ();
+#endif
+
+  #pragma omp target exit data map (release: _map)
+
+  bool ok = validate (sum, keys, data);
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__multimap-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__multimap-concurrent.C
new file mode 100644
index 00000000000..65e6732cba3
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__multimap-concurrent.C
@@ -0,0 +1,64 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <map>
+
+// Make sure that KEY_MAX is less than N to ensure some duplicate keys.
+#define N 3000
+#define KEY_MAX 1000
+
+void init (int data[], int max)
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand () % max;
+}
+
+bool validate (long long sum, int keys[], int data[])
+{
+  long long total = 0;
+  for (int i = 0; i < N; ++i)
+    total += (long long) keys[i] * data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int keys[N], data[N];
+  std::multimap<int,int> _map;
+
+  srand (time (NULL));
+  init (keys, KEY_MAX);
+  init (data, RAND_MAX);
+
+  #pragma omp target enter data map (to: keys[:N], data[:N]) map (alloc: _map)
+
+  #pragma omp target
+    {
+#ifndef MEM_SHARED
+      new (&_map) std::multimap<int,int> ();
+#endif
+      for (int i = 0; i < N; ++i)
+       _map.insert({keys[i], data[i]});
+    }
+
+  long long sum = 0;
+  #pragma omp target teams distribute parallel for reduction (+:sum)
+    for (int i = 0; i < KEY_MAX; ++i)
+      {
+       auto range = _map.equal_range (i);
+       for (auto it = range.first; it != range.second; ++it)
+         sum += (long long) it->first * it->second;
+      }
+
+#ifndef MEM_SHARED
+  #pragma omp target
+    _map.~multimap ();
+#endif
+
+  #pragma omp target exit data map (release: _map)
+
+  bool ok = validate (sum, keys, data);
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__multiset-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__multiset-concurrent.C
new file mode 100644
index 00000000000..0b16ca53953
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__multiset-concurrent.C
@@ -0,0 +1,60 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+#include <set>
+#include <algorithm>
+
+// MAX should be less than N to ensure that some duplicates occur.
+#define N 4000
+#define MAX 1000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand () % MAX;
+}
+
+bool validate (int sum, int data[])
+{
+  int total = 0;
+  for (int i = 0; i < N; ++i)
+    total += data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int data[N];
+  std::multiset<int> set;
+  int sum = 0;
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target data map (to: data[:N]) map (alloc: set)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&set) std::multiset<int> ();
+#endif
+         for (int i = 0; i < N; ++i)
+           set.insert (data[i]);
+       }
+
+      #pragma omp target teams distribute parallel for reduction (+:sum)
+       for (int i = 0; i < MAX; ++i)
+         sum += i * set.count (i);
+
+#ifndef MEM_SHARED
+      #pragma omp target
+       set.~multiset ();
+#endif
+    }
+
+  bool ok = validate (sum, data);
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__set-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__set-concurrent.C
new file mode 100644
index 00000000000..6953b6352a2
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__set-concurrent.C
@@ -0,0 +1,66 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+#include <algorithm>
+
+#define N 4000
+#define MAX 16384
+
+void init (int data[])
+{
+  std::set<int> _set;
+  for (int i = 0; i < N; ++i)
+    {
+      // Avoid duplicates in data array.
+      do
+       data[i] = rand () % MAX;
+      while (_set.find (data[i]) != _set.end ());
+      _set.insert (data[i]);
+    }
+}
+
+bool validate (int sum, int data[])
+{
+  int total = 0;
+  for (int i = 0; i < N; ++i)
+    total += data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int data[N];
+  std::set<int> _set;
+  int sum = 0;
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target data map (to: data[:N]) map (alloc: _set)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&_set) std::set<int> ();
+#endif
+         for (int i = 0; i < N; ++i)
+           _set.insert (data[i]);
+       }
+
+      #pragma omp target teams distribute parallel for reduction (+:sum)
+       for (int i = 0; i < MAX; ++i)
+         if (_set.find (i) != _set.end ())
+           sum += i;
+
+#ifndef MEM_SHARED
+      #pragma omp target
+       _set.~set ();
+#endif
+    }
+
+  bool ok = validate (sum, data);
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__span-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__span-concurrent.C
new file mode 100644
index 00000000000..ac89a8980c6
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__span-concurrent.C
@@ -0,0 +1,62 @@
+// { dg-do run }
+// { dg-additional-options "-std=c++20" }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <span>
+
+#define N 64
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand ();
+}
+
+#pragma omp declare target
+bool validate (const std::span<int, N> &span, int data[])
+{
+  for (int i = 0; i < N; ++i)
+    if (span[i] != data[i] * data[i])
+      return false;
+  return true;
+}
+#pragma omp end declare target
+
+int main (void)
+{
+  int data[N];
+  bool ok;
+  int elements[N];
+  std::span<int, N> span(elements);
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target enter data map (to: data[:N]) map (alloc: elements, span)
+
+  #pragma omp target
+    {
+#ifndef MEM_SHARED
+      new (&span) std::span<int, N> (elements);
+#endif
+      std::copy (data, data + N, span.begin ());
+    }
+
+  #pragma omp target teams distribute parallel for
+    for (int i = 0; i < N; ++i)
+      span[i] *= span[i];
+
+  #pragma omp target map (from: ok)
+    {
+      ok = validate (span, data);
+#ifndef MEM_SHARED
+      span.~span ();
+#endif
+    }
+
+  #pragma omp target exit data map (release: elements, span)
+
+  return ok ? 0 : 1;
+}
diff --git 
a/libgomp/testsuite/libgomp.c++/target-std__unordered_map-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__unordered_map-concurrent.C
new file mode 100644
index 00000000000..00d794310aa
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__unordered_map-concurrent.C
@@ -0,0 +1,66 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+#include <unordered_map>
+
+#define N 3000
+
+void init (int data[], bool unique)
+{
+  std::set<int> _set;
+  for (int i = 0; i < N; ++i)
+    {
+      // Avoid duplicates in data array if unique is true.
+      do
+       data[i] = rand ();
+      while (unique && _set.count (data[i]) > 0);
+      _set.insert (data[i]);
+    }
+}
+
+bool validate (long long sum, int keys[], int data[])
+{
+  long long total = 0;
+  for (int i = 0; i < N; ++i)
+    total += (long long) keys[i] * data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int keys[N], data[N];
+  std::unordered_map<int,int> _map;
+
+  srand (time (NULL));
+  init (keys, true);
+  init (data, false);
+
+  #pragma omp target enter data map (to: keys[:N], data[:N]) map (alloc: _map)
+
+  #pragma omp target
+    {
+#ifndef MEM_SHARED
+      new (&_map) std::unordered_map<int,int> ();
+#endif
+      for (int i = 0; i < N; ++i)
+       _map[keys[i]] = data[i];
+    }
+
+  long long sum = 0;
+  #pragma omp target teams distribute parallel for reduction (+:sum)
+    for (int i = 0; i < N; ++i)
+      sum += (long long) keys[i] * _map[keys[i]];
+
+#ifndef MEM_SHARED
+  #pragma omp target
+    _map.~unordered_map ();
+#endif
+
+  #pragma omp target exit data map (release: _map)
+
+  bool ok = validate (sum, keys, data);
+  return ok ? 0 : 1;
+}
diff --git 
a/libgomp/testsuite/libgomp.c++/target-std__unordered_multimap-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__unordered_multimap-concurrent.C
new file mode 100644
index 00000000000..2567634bcda
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__unordered_multimap-concurrent.C
@@ -0,0 +1,65 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <unordered_map>
+
+// Make sure that KEY_MAX is less than N to ensure some duplicate keys.
+#define N 3000
+#define KEY_MAX 1000
+
+void init (int data[], int max)
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = i % max;
+}
+
+bool validate (long long sum, int keys[], int data[])
+{
+  long long total = 0;
+  for (int i = 0; i < N; ++i)
+    total += (long long) keys[i] * data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int keys[N], data[N];
+  std::unordered_multimap<int,int> _map;
+
+  srand (time (NULL));
+  init (keys, KEY_MAX);
+  init (data, RAND_MAX);
+
+  #pragma omp target enter data map (to: keys[:N], data[:N]) map (alloc: _map)
+
+  #pragma omp target
+    {
+#ifndef MEM_SHARED
+      new (&_map) std::unordered_multimap<int,int> ();
+#endif
+      for (int i = 0; i < N; ++i)
+       _map.insert({keys[i], data[i]});
+    }
+
+  long long sum = 0;
+  #pragma omp target teams distribute parallel for reduction (+:sum)
+    for (int i = 0; i < KEY_MAX; ++i)
+      {
+       auto range = _map.equal_range (i);
+       for (auto it = range.first; it != range.second; ++it) {
+         sum += (long long) it->first * it->second;
+       }
+      }
+
+#ifndef MEM_SHARED
+  #pragma omp target
+    _map.~unordered_multimap ();
+#endif
+
+  #pragma omp target exit data map (release: _map)
+
+  bool ok = validate (sum, keys, data);
+  return ok ? 0 : 1;
+}
diff --git 
a/libgomp/testsuite/libgomp.c++/target-std__unordered_multiset-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__unordered_multiset-concurrent.C
new file mode 100644
index 00000000000..da6c8755abe
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__unordered_multiset-concurrent.C
@@ -0,0 +1,59 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <unordered_set>
+#include <algorithm>
+
+// MAX should be less than N to ensure that some duplicates occur.
+#define N 4000
+#define MAX 1000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand () % MAX;
+}
+
+bool validate (int sum, int data[])
+{
+  int total = 0;
+  for (int i = 0; i < N; ++i)
+    total += data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int data[N];
+  std::unordered_multiset<int> set;
+  int sum = 0;
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target data map (to: data[:N]) map (alloc: set)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&set) std::unordered_multiset<int> ();
+#endif
+         for (int i = 0; i < N; ++i)
+           set.insert (data[i]);
+       }
+
+      #pragma omp target teams distribute parallel for reduction (+:sum)
+       for (int i = 0; i < MAX; ++i)
+         sum += i * set.count (i);
+
+#ifndef MEM_SHARED
+      #pragma omp target
+       set.~unordered_multiset ();
+#endif
+    }
+
+  bool ok = validate (sum, data);
+  return ok ? 0 : 1;
+}
diff --git 
a/libgomp/testsuite/libgomp.c++/target-std__unordered_set-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__unordered_set-concurrent.C
new file mode 100644
index 00000000000..b7bd935748f
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__unordered_set-concurrent.C
@@ -0,0 +1,66 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <unordered_set>
+#include <algorithm>
+
+#define N 4000
+#define MAX 16384
+
+void init (int data[])
+{
+  std::unordered_set<int> _set;
+  for (int i = 0; i < N; ++i)
+    {
+      // Avoid duplicates in data array.
+      do
+       data[i] = rand () % MAX;
+      while (_set.count (data[i]) != 0);
+      _set.insert (data[i]);
+    }
+}
+
+bool validate (int sum, int data[])
+{
+  int total = 0;
+  for (int i = 0; i < N; ++i)
+    total += data[i];
+  return sum == total;
+}
+
+int main (void)
+{
+  int data[N];
+  std::unordered_set<int> _set;
+  int sum = 0;
+
+  srand (time (NULL));
+  init (data);
+
+  #pragma omp target data map (to: data[:N]) map (alloc: _set)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&_set) std::unordered_set<int> ();
+#endif
+         for (int i = 0; i < N; ++i)
+           _set.insert (data[i]);
+       }
+
+      #pragma omp target teams distribute parallel for reduction (+:sum)
+       for (int i = 0; i < MAX; ++i)
+         if (_set.count (i) > 0)
+           sum += i;
+
+#ifndef MEM_SHARED
+      #pragma omp target
+       _set.~unordered_set ();
+#endif
+    }
+
+  bool ok = validate (sum, data);
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__valarray-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__valarray-concurrent.C
new file mode 100644
index 00000000000..127aec436f5
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__valarray-concurrent.C
@@ -0,0 +1,64 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <valarray>
+
+#define N 50000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand ();
+}
+
+#pragma omp declare target
+bool validate (const std::valarray<int> &arr, int data[])
+{
+  for (int i = 0; i < N; ++i)
+    if (arr[i] != data[i] * data[i] + i)
+      return false;
+  return true;
+}
+#pragma omp end declare target
+
+int main (void)
+{
+  int data[N];
+  bool ok;
+
+  srand (time (NULL));
+  init (data);
+
+#ifdef MEM_SHARED
+  std::valarray<int> arr (data, N);
+#else
+  std::valarray<int> arr;
+#endif
+
+  #pragma omp target data map (to: data[:N]) map (alloc: arr)
+    {
+      #pragma omp target
+       {
+#ifndef MEM_SHARED
+         new (&arr) std::valarray<int> (data, N);
+#endif
+         arr *= arr;
+       }
+
+      #pragma omp target teams distribute parallel for
+       for (int i = 0; i < N; ++i)
+         arr[i] += i;
+
+      #pragma omp target map (from: ok)
+       {
+         ok = validate (arr, data);
+#ifndef MEM_SHARED
+         arr.~valarray ();
+#endif
+       }
+    }
+
+  return ok ? 0 : 1;
+}
diff --git a/libgomp/testsuite/libgomp.c++/target-std__vector-concurrent.C 
b/libgomp/testsuite/libgomp.c++/target-std__vector-concurrent.C
new file mode 100644
index 00000000000..1367f965527
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/target-std__vector-concurrent.C
@@ -0,0 +1,61 @@
+// { dg-do run }
+// { dg-additional-options -DMEM_SHARED { target offload_device_shared_as } }
+
+#include <stdlib.h>
+#include <time.h>
+#include <vector>
+
+#define N 50000
+
+void init (int data[])
+{
+  for (int i = 0; i < N; ++i)
+    data[i] = rand ();
+}
+
+#pragma omp declare target
+bool validate (const std::vector<int> &vec, int data[])
+{
+  for (int i = 0; i < N; ++i)
+    if (vec[i] != data[i] * data[i])
+      return false;
+  return true;
+}
+#pragma omp end declare target
+
+int main (void)
+{
+  int data[N];
+  bool ok;
+
+  srand (time (NULL));
+  init (data);
+
+#ifdef MEM_SHARED
+  std::vector<int> vec (data, data + N);
+#else
+  std::vector<int> vec;
+#endif
+
+  #pragma omp target data map (to: data[:N]) map (alloc: vec)
+    {
+#ifndef MEM_SHARED
+      #pragma omp target
+       new (&vec) std::vector<int> (data, data + N);
+#endif
+
+      #pragma omp target teams distribute parallel for
+       for (int i = 0; i < N; ++i)
+         vec[i] *= vec[i];
+
+      #pragma omp target map (from: ok)
+       {
+         ok = validate (vec, data);
+#ifndef MEM_SHARED
+         vec.~vector ();
+#endif
+       }
+    }
+
+  return ok ? 0 : 1;
+}
-- 
2.34.1

Reply via email to