https://gcc.gnu.org/g:515d9be7944e89f5ec4363f9816ad4031ab6394b

commit r16-109-g515d9be7944e89f5ec4363f9816ad4031ab6394b
Author: Tobias Burnus <tbur...@baylibre.com>
Date:   Thu Apr 24 14:36:37 2025 +0200

    libgomp: Add additional OpenMP interop runtime tests
    
    Add checks for nowait/depend and for checks that the returned
    CUDA, CUDA_DRIVER and HIP interop objects actually work.
    
    While the CUDA/CUDA_DRIVER ones are only for Nvidia GPUs, HIP
    works on both AMD and Nvidia GPUs; on Nvidia GPUs, it is a
    very thin wrapper around CUDA.
    
    For Fortran, only a HIP test has been added - using hipfort.
    
    While libgomp.c-c++-common/interop-2.c always works - even without
    GPU - and checks for depend / nowait, all others require that
    runtime libraries are found at link (and execution) time:
    For Nvidia GPUs, libcuda + libcudart or libcublas,
    For AMD GPUs, libamdhip64 or libhipblas.
    
    The header files and hipfort modules do not need to be present as a
    fallback has been implemented, but if they are, they get used.
    
    Due to the combinations, the basic 1x C/C++, 4x C and 1x Fortran tests
    yield 1x C/C++, 14x C and 4 Fortran run-test files.
    
    libgomp/ChangeLog:
    
            * testsuite/lib/libgomp.exp (check_effective_target_openacc_cublas,
            check_effective_target_openacc_cudart): Update description as
            the check requires more.
            (check_effective_target_openacc_libcuda,
            check_effective_target_openacc_libcublas,
            check_effective_target_openacc_libcudart,
            check_effective_target_gomp_hip_header_amd,
            check_effective_target_gomp_hip_header_nvidia,
            check_effective_target_gomp_hipfort_module,
            check_effective_target_gomp_libamdhip64,
            check_effective_target_gomp_libhipblas): New.
            * testsuite/libgomp.c-c++-common/interop-2.c: New test.
            * testsuite/libgomp.c/interop-cublas-full.c: New test.
            * testsuite/libgomp.c/interop-cublas-libonly.c: New test.
            * testsuite/libgomp.c/interop-cuda-full.c: New test.
            * testsuite/libgomp.c/interop-cuda-libonly.c: New test.
            * testsuite/libgomp.c/interop-hip-amd-full.c: New test.
            * testsuite/libgomp.c/interop-hip-amd-no-hip-header.c: New test.
            * testsuite/libgomp.c/interop-hip-nvidia-full.c: New test.
            * testsuite/libgomp.c/interop-hip-nvidia-no-headers.c: New test.
            * testsuite/libgomp.c/interop-hip-nvidia-no-hip-header.c: New test.
            * testsuite/libgomp.c/interop-hip.h: New test.
            * testsuite/libgomp.c/interop-hipblas-amd-full.c: New test.
            * testsuite/libgomp.c/interop-hipblas-amd-no-hip-header.c: New test.
            * testsuite/libgomp.c/interop-hipblas-nvidia-full.c: New test.
            * testsuite/libgomp.c/interop-hipblas-nvidia-no-headers.c: New test.
            * testsuite/libgomp.c/interop-hipblas-nvidia-no-hip-header.c: New 
test.
            * testsuite/libgomp.c/interop-hipblas.h: New test.
            * testsuite/libgomp.fortran/interop-hip-amd-full.F90: New test.
            * testsuite/libgomp.fortran/interop-hip-amd-no-module.F90: New test.
            * testsuite/libgomp.fortran/interop-hip-nvidia-full.F90: New test.
            * testsuite/libgomp.fortran/interop-hip-nvidia-no-module.F90: New 
test.
            * testsuite/libgomp.fortran/interop-hip.h: New test.

Diff:
---
 libgomp/testsuite/lib/libgomp.exp                  | 133 +++++++++++-
 libgomp/testsuite/libgomp.c-c++-common/interop-2.c | 129 ++++++++++++
 libgomp/testsuite/libgomp.c/interop-cublas-full.c  | 176 ++++++++++++++++
 .../testsuite/libgomp.c/interop-cublas-libonly.c   |   7 +
 libgomp/testsuite/libgomp.c/interop-cuda-full.c    | 159 ++++++++++++++
 libgomp/testsuite/libgomp.c/interop-cuda-libonly.c |   8 +
 libgomp/testsuite/libgomp.c/interop-hip-amd-full.c |   7 +
 .../libgomp.c/interop-hip-amd-no-hip-header.c      |   8 +
 .../testsuite/libgomp.c/interop-hip-nvidia-full.c  |   8 +
 .../libgomp.c/interop-hip-nvidia-no-headers.c      |  10 +
 .../libgomp.c/interop-hip-nvidia-no-hip-header.c   |   9 +
 libgomp/testsuite/libgomp.c/interop-hip.h          | 234 +++++++++++++++++++++
 .../testsuite/libgomp.c/interop-hipblas-amd-full.c |   7 +
 .../libgomp.c/interop-hipblas-amd-no-hip-header.c  |   8 +
 .../libgomp.c/interop-hipblas-nvidia-full.c        |   7 +
 .../libgomp.c/interop-hipblas-nvidia-no-headers.c  |   9 +
 .../interop-hipblas-nvidia-no-hip-header.c         |   8 +
 libgomp/testsuite/libgomp.c/interop-hipblas.h      | 228 ++++++++++++++++++++
 .../libgomp.fortran/interop-hip-amd-full.F90       |   7 +
 .../libgomp.fortran/interop-hip-amd-no-module.F90  |   6 +
 .../libgomp.fortran/interop-hip-nvidia-full.F90    |   9 +
 .../interop-hip-nvidia-no-module.F90               |   8 +
 libgomp/testsuite/libgomp.fortran/interop-hip.h    | 214 +++++++++++++++++++
 23 files changed, 1397 insertions(+), 2 deletions(-)

diff --git a/libgomp/testsuite/lib/libgomp.exp 
b/libgomp/testsuite/lib/libgomp.exp
index bc38e3ca6d98..a057394ca13f 100644
--- a/libgomp/testsuite/lib/libgomp.exp
+++ b/libgomp/testsuite/lib/libgomp.exp
@@ -553,7 +553,23 @@ int main() {
 } } "-lcuda" ]
 }
 
-# Return 1 if cublas_v2.h and -lcublas are available.
+# Return 1 if -lcuda is available (header not required).
+
+proc check_effective_target_openacc_libcuda { } {
+    return [check_no_compiler_messages openacc_libcuda executable {
+typedef enum { CUDA_SUCCESS } CUresult;
+typedef int CUdevice;
+CUresult cuDeviceGet (CUdevice *, int);
+int main() {
+    CUdevice dev;
+    CUresult r = cuDeviceGet (&dev, 0);
+    if (r != CUDA_SUCCESS)
+       return 1;
+    return 0;
+} } "-lcuda" ]
+}
+
+# Return 1 if cublas_v2.h, cuda.h, -lcublas and -lcuda are available.
 
 proc check_effective_target_openacc_cublas { } {
     return [check_no_compiler_messages openacc_cublas executable {
@@ -573,7 +589,25 @@ int main() {
 } } "-lcuda -lcublas" ]
 }
 
-# Return 1 if cuda_runtime_api.h and -lcudart are available.
+# Return 1 if -lcublas is available header not required).
+
+proc check_effective_target_openacc_libcublas { } {
+    return [check_no_compiler_messages openacc_libcublas executable {
+typedef enum { CUBLAS_STATUS_SUCCESS } cublasStatus_t;
+typedef struct cublasContext* cublasHandle_t;
+#define cublasCreate cublasCreate_v2
+cublasStatus_t cublasCreate_v2 (cublasHandle_t *);
+int main() {
+    cublasStatus_t s;
+    cublasHandle_t h;
+    s = cublasCreate (&h);
+    if (s != CUBLAS_STATUS_SUCCESS)
+       return 1;
+    return 0;
+} } "-lcublas" ]
+}
+
+# Return 1 if cuda_runtime_api.h, cuda.h, -lcuda and -lcudart are available.
 
 proc check_effective_target_openacc_cudart { } {
     return [check_no_compiler_messages openacc_cudart executable {
@@ -592,3 +626,98 @@ int main() {
     return 0;
 } } "-lcuda -lcudart" ]
 }
+
+# Return 1 if -lcudart is available (no header required).
+
+proc check_effective_target_openacc_libcudart { } {
+    return [check_no_compiler_messages openacc_libcudart executable {
+typedef int cudaError_t;
+cudaError_t cudaGetDevice(int *);
+enum { cudaSuccess };
+int main() {
+    cudaError_t e;
+    int devn;
+    e = cudaGetDevice (&devn);
+    if (e != cudaSuccess)
+       return 1;
+    return 0;
+} } "-lcudart" ]
+}
+
+# Return 1 if hip.h is available (no link check; AMD platform).
+
+proc check_effective_target_gomp_hip_header_amd { } {
+    return [check_no_compiler_messages gomp_hip_header_amd assembly {
+#define __HIP_PLATFORM_AMD__
+#include <hip/hip_runtime_api.h>
+int main() {
+    hipDevice_t dev;
+    hipError_t r = hipDeviceGet (&dev, 0);
+    if (r != hipSuccess)
+       return 1;
+    return 0;
+} }]
+}
+
+# Return 1 if hip.h is available (no link check; Nvidia/CUDA platform).
+
+proc check_effective_target_gomp_hip_header_nvidia { } {
+    return [check_no_compiler_messages gomp_hip_header_nvidia assembly {
+#define __HIP_PLATFORM_NVIDIA__
+#include <hip/hip_runtime_api.h>
+int main() {
+    hipDevice_t dev;
+    hipError_t r = hipDeviceGet (&dev, 0);
+    if (r != hipSuccess)
+       return 1;
+    return 0;
+} }]
+}
+
+# Return 1 if the Fortran hipfort module is available (no link check)
+
+proc check_effective_target_gomp_hipfort_module { } {
+    return [check_no_compiler_messages gomp_hipfort_module assembly {
+! Fortran
+use hipfort
+implicit none
+integer(kind(hipSuccess)) :: r
+integer(c_int) :: dev
+r = hipDeviceGet (dev, 0)
+if (r /= hipSuccess) error stop
+end
+}]
+}
+
+# Return 1 if AMD HIP's -lamdhip64 is available (no header required).
+
+proc check_effective_target_gomp_libamdhip64 { } {
+    return [check_no_compiler_messages gomp_libamdhip64 executable {
+typedef int hipError_t;
+typedef int hipDevice_t;
+enum { hipSuccess = 0 };
+hipError_t hipDeviceGet(hipDevice_t*, int);
+int main() {
+    hipDevice_t dev;
+    hipError_t r = hipDeviceGet (&dev, 0);
+    if (r != hipSuccess)
+       return 1;
+    return 0;
+} } "-lamdhip64" ]
+}
+
+# Return 1 if AMD HIP's -lamdhip64 is available (no header required).
+
+proc check_effective_target_gomp_libhipblas { } {
+    return [check_no_compiler_messages gomp_libhipblas executable {
+typedef enum { HIPBLAS_STATUS_SUCCESS = 0 } hipblasStatus_t;
+typedef void* hipblasHandle_t;
+hipblasStatus_t hipblasCreate (hipblasHandle_t*);
+int main() {
+    hipblasHandle_t handle;
+    hipblasStatus_t stat = hipblasCreate (&handle);
+    if (stat != HIPBLAS_STATUS_SUCCESS)
+       return 1;
+    return 0;
+} } "-lhipblas" ]
+}
diff --git a/libgomp/testsuite/libgomp.c-c++-common/interop-2.c 
b/libgomp/testsuite/libgomp.c-c++-common/interop-2.c
new file mode 100644
index 000000000000..a7526dcf41ea
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c-c++-common/interop-2.c
@@ -0,0 +1,129 @@
+/* { dg-do run } */
+/* { dg-additional-options "-lm" } */
+
+/* Note: At the time this program was written, Nvptx was not asynchronous
+   enough to trigger the issue (with a 'nowait' added); however, one
+   AMD GPUs, it triggered.  */
+
+/* Test whether nowait / dependency is handled correctly.
+   Motivated by OpenMP_VV's 5.1/interop/test_interop_target.c
+
+   The code actually only creates a streaming object without actually using it,
+   except for dependency tracking.
+
+   Note that there is a difference between having a steaming (targetsync) 
object
+   and not (= omp_interop_none); at least if one assumes that omp_interop_none
+   does not include 'targetsync' as (effective) interop type - in that case,
+   'nowait' has no effect and the 'depend' is active as included task, 
otherwise
+   the code continues with the depend being active only for the about to be
+   destroyed or used thread.
+
+   The OpenMP spec states (here 6.0):
+     "If the interop-type set includes 'targetsync', an empty mergeable task is
+      generated.  If the 'nowait' clause is not present on the construct then
+      the task is also an included task. If the interop-type set does not
+      include 'targetsync', the 'nowait' clause has no effect.  Any depend
+      clauses that are present on the construct apply to the generated task.  
*/
+
+#include <omp.h>
+
+void
+test_async (const int dev)
+{
+  constexpr int N = 2048;
+  constexpr int ulp = 4;
+  constexpr double M_PI = 2.0 * __builtin_acos (0.0);
+  omp_interop_t obj1, obj2;
+  double A[N] = { };
+  int B[N] = { };
+
+  /* Create interop object.  */
+  #pragma omp interop device(dev) init(targetsync : obj1, obj2)
+
+  if (dev == omp_initial_device || dev == omp_get_num_devices ())
+    {
+      if (obj1 != omp_interop_none || obj2 != omp_interop_none)
+       __builtin_abort ();
+    }
+  else
+    {
+      if (obj1 == omp_interop_none || obj2 == omp_interop_none)
+       __builtin_abort ();
+    }
+
+  /* DOUBLE */
+
+  /* Now in the background update it, slowly enough that the
+     code afterwards is reached while still running asynchronously.
+     As OpenMP_VV's Issue #863 shows, the overhead is high enough to
+     fail even when only doing an atomic integer increment.  */
+
+  #pragma omp target device(dev) map(A) depend(out: A[:N]) nowait
+  for (int i = 0; i < N; i++)
+    #pragma omp atomic update
+    A[i] += __builtin_sin (2*i*M_PI/N);
+
+  /* DESTROY take care of the dependeny such that ... */
+
+  if (obj1 == omp_interop_none)
+    {
+      // Same as below as 'nowait' is ignored.
+      #pragma omp interop destroy(obj1) depend(in: A[:N]) nowait
+    }
+  else
+    {
+      #pragma omp interop destroy(obj1) depend(in: A[:N])
+    }
+
+  /* ... this code is only executed once the dependency as been fulfilled.  */
+
+  /* Check the value - part I: quick, avoid A[0] == sin(0) = 0.  */
+  for (int i = 1; i < N; i++)
+    if (A[i] == 0.0)
+      __builtin_abort ();
+
+  /* Check the value - part II: throughly */
+  for (int i = 0; i < N; i++)
+    {
+      double x = A[i];
+      double y = __builtin_sin (2*i*M_PI/N);
+      if (__builtin_fabs (x - y) > ulp * __builtin_fabs (x+y) * 
__DBL_EPSILON__)
+       __builtin_abort ();
+    }
+
+  /* Integer */
+
+  #pragma omp target device(dev) map(B) depend(out: B[:N]) nowait
+  for (int i = 0; i < N; i++)
+    #pragma omp atomic update
+    B[i] += 42;
+
+  /* Same - but using USE.  */
+  if (obj2 == omp_interop_none)
+    {
+      // Same as below as 'nowait' is ignored.
+      #pragma omp interop use(obj2) depend(in: B[:N]) nowait
+    }
+  else
+    {
+      #pragma omp interop use(obj2) depend(in: B[:N])
+    }
+
+  for (int i = 0; i < N; i++)
+    if (B[i] != 42)
+      __builtin_abort ();
+
+  #pragma omp interop destroy(obj2)
+}
+
+int
+main ()
+{
+  int ndev = omp_get_num_devices ();
+
+  for (int dev = 0; dev <= ndev; dev++)
+    test_async (dev);
+  test_async (omp_initial_device);
+
+  return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/interop-cublas-full.c 
b/libgomp/testsuite/libgomp.c/interop-cublas-full.c
new file mode 100644
index 000000000000..2df52771a139
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-cublas-full.c
@@ -0,0 +1,176 @@
+/* { dg-require-effective-target openacc_cublas } */
+/* { dg-additional-options "-lcublas" } */
+
+/* NOTE: This file is also included by 
libgomp.c-c++-common/interop-cudablas-libonly.c
+   to test the fallback version.  */
+
+/* Check whether cuBlas' daxpy works with an interop object.
+     daxpy(N, DA, DX, INCX, DY, INCY)
+   calculates (for DX = DY = 1):
+     DY(1:N) =  DY(1:N) + DA * DX(1:N)
+   and otherwise N array elements, taking every INCX-th or INCY-th one, 
repectively.
+
+Based on the interop example in OpenMP's example document  */
+
+/* Minimal check whether CUDA works - by checking whether the API routines
+   seem to work.  This includes a fallback if the header is not
+   available.  */
+
+#include <assert.h>
+#include <omp.h>
+#include "../libgomp.c-c++-common/on_device_arch.h"
+
+
+#if __has_include(<cuda.h>) && __has_include(<cudaTypedefs.h>) && 
__has_include(<cuda_runtime.h>) && __has_include(<cublas_v2.h>) && 
!defined(USE_CUDA_FALLBACK_HEADER)
+  #include <cuda.h>
+  #include <cudaTypedefs.h>
+  #include <cuda_runtime.h>
+  #include <cublas_v2.h>
+
+#else
+  /* Add a poor man's fallback declaration.  */
+  #if USE_CUDA_FALLBACK_HEADER
+    // Don't warn.
+  #elif !__has_include(<cuda.h>)
+    #warning "Using GCC's cuda.h as fallback for cuda.h"
+  #elif !__has_include(<cudaTypedefs.h>)
+    #warning "Using GCC's cuda.h as fallback for cudaTypedefs.h"
+  #elif !__has_include(<cuda_runtime.h>)
+    #warning "Using GCC's cuda.h as fallback for cuda_runtime.h"
+  #else
+    #warning "Using GCC's cuda.h as fallback for cublas_v2.h"
+  #endif
+  #include "../../../include/cuda/cuda.h"
+
+  typedef enum {
+    CUBLAS_STATUS_SUCCESS = 0,
+  } cublasStatus_t;
+
+  typedef CUstream cudaStream_t;
+  typedef struct cublasContext* cublasHandle_t;
+
+  #define cublasCreate cublasCreate_v2
+  cublasStatus_t cublasCreate_v2 (cublasHandle_t *);
+
+  #define cublasSetStream cublasSetStream_v2
+  cublasStatus_t cublasSetStream_v2 (cublasHandle_t, cudaStream_t);
+
+  #define cublasDaxpy cublasDaxpy_v2
+  cublasStatus_t cublasDaxpy_v2(cublasHandle_t, int, const double*, const 
double*, int, double*, int);
+#endif
+
+static int used_variant = 0;
+
+void
+run_cuBlasdaxpy (int n, double da, const double *dx, int incx, double *dy, int 
incy, omp_interop_t obj)
+{
+  used_variant = 1;
+
+  omp_interop_rc_t res;
+  cublasStatus_t stat;
+
+  omp_intptr_t fr = omp_get_interop_int(obj, omp_ipr_fr_id, &res);
+  assert (res == omp_irc_success && fr == omp_ifr_cuda);
+
+  cudaStream_t stream = (cudaStream_t) omp_get_interop_ptr (obj, 
omp_ipr_targetsync, &res);
+  assert (res == omp_irc_success);
+
+  cublasHandle_t handle;
+  stat = cublasCreate (&handle);
+  assert (stat == CUBLAS_STATUS_SUCCESS);
+
+  stat = cublasSetStream (handle, stream);
+  assert (stat == CUBLAS_STATUS_SUCCESS);
+
+  /* 'da' can be in host or device space, 'dx' and 'dy' must be in device 
space.  */
+  stat = cublasDaxpy (handle, n, &da, dx, 1, dy, 1) ;
+  assert (stat == CUBLAS_STATUS_SUCCESS);
+}
+
+
+#pragma omp declare variant(run_cuBlasdaxpy) \
+                       match(construct={dispatch}, 
target_device={kind(nohost), arch("nvptx")}) \
+                       adjust_args(need_device_ptr : dx, dy) \
+                       append_args(interop(targetsync, prefer_type("cuda")))
+
+void
+run_daxpy (int n, double da, const double *dx, int incx, double *dy, int incy)
+{
+  used_variant = 2;
+
+  if (incx == 1 && incy == 1)
+    #pragma omp simd
+    for (int i = 0; i < n; i++)
+      dy[i] += da * dx[i];
+  else
+    {
+      int ix = 0;
+      int iy = 0;
+      for (int i = 0; i < n; i++)
+       {
+         dy[iy] += da * dx[ix];
+         ix += incx;
+         iy += incy;
+       }
+    }
+}
+
+
+void
+run_test (int dev)
+{
+  constexpr int N = 1024;
+
+  // A = {1,2,...,N}
+  // B = {-1, -2, ..., N}
+  // B' = daxpy (N, 3, A, incx=1, B, incy=1)
+  //    = B + 3*A
+  // -> B' = {0, 2, 4, 6, ... }
+
+  double A[N], B[N];
+  double factor = 3.0;
+  for (int i = 0; i < N; i++)
+    {
+      A[i] = i;
+      B[i] = -i;
+    }
+
+  if (dev != omp_initial_device && dev != omp_get_num_devices ())
+    {
+      #pragma omp target enter data device(dev) map(A, B)
+    }
+
+  used_variant = 99;
+  #pragma omp dispatch device(dev)
+    run_daxpy (N, factor, A, 1, B, 1);  
+
+  if (dev != omp_initial_device && dev != omp_get_num_devices ())
+    {
+      #pragma omp target exit data device(dev) map(release: A) map(from: B)
+
+      int tmp = omp_get_default_device ();
+      omp_set_default_device (dev);
+      if (on_device_arch_nvptx ())
+       assert (used_variant == 1);
+      else
+       assert (used_variant == 2);
+      omp_set_default_device (tmp);
+    }
+  else
+    assert (used_variant == 2);
+
+  for (int i = 0; i < N; i++)
+    assert (B[i] == 2*i);
+}
+
+int   
+main () 
+{   
+  int ndev = omp_get_num_devices ();
+
+  for (int dev = 0; dev <= ndev; dev++)
+    run_test (dev);
+  run_test (omp_initial_device);  
+
+  return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/interop-cublas-libonly.c 
b/libgomp/testsuite/libgomp.c/interop-cublas-libonly.c
new file mode 100644
index 000000000000..89c06524f555
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-cublas-libonly.c
@@ -0,0 +1,7 @@
+/* { dg-require-effective-target openacc_libcublas } */
+/* { dg-additional-options "-lcublas" } */
+
+/* Same as interop-cudablas-full.c, but also works if the header is not 
available. */
+
+#define USE_CUDA_FALLBACK_HEADER 1
+#include "interop-cublas-full.c"
diff --git a/libgomp/testsuite/libgomp.c/interop-cuda-full.c 
b/libgomp/testsuite/libgomp.c/interop-cuda-full.c
new file mode 100644
index 000000000000..38aa6b130bb7
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-cuda-full.c
@@ -0,0 +1,159 @@
+/* { dg-require-effective-target openacc_cuda } */
+/* { dg-require-effective-target openacc_cudart } */
+/* { dg-additional-options "-lcuda -lcudart" } */
+
+/* NOTE: This file is also included by 
libgomp.c-c++-common/interop-cuda-libonly.c
+   to test the fallback version, which defines USE_CUDA_FALLBACK_HEADER.  */
+
+/* Minimal check whether CUDA works - by checking whether the API routines
+   seem to work.  This includes a fallback if the header is not
+   available.  */
+
+#include <assert.h>
+#include <omp.h>
+
+#if __has_include(<cuda.h>) && __has_include(<cudaTypedefs.h>) && 
__has_include(<cuda_runtime.h>) && !defined(USE_CUDA_FALLBACK_HEADER)
+  #include <cuda.h>
+  #include <cudaTypedefs.h>
+  #include <cuda_runtime.h>
+
+#else
+  /* Add a poor man's fallback declaration.  */
+  #if USE_CUDA_FALLBACK_HEADER
+    // Don't warn.
+  #elif !__has_include(<cuda.h>)
+    #warning "Using GCC's cuda.h as fallback for cuda.h"
+  #elif !__has_include(<cudaTypedefs.h>)
+    #warning "Using GCC's cuda.h as fallback for cudaTypedefs.h"
+  #else
+    #warning "Using GCC's cuda.h as fallback for cuda_runtime.h"
+  #endif
+  #include "../../../include/cuda/cuda.h"
+
+  typedef int cudaError_t;
+  typedef CUstream cudaStream_t;
+  enum {
+    cudaSuccess = 0
+  };
+
+  enum cudaDeviceAttr {
+    cudaDevAttrClockRate = 13,
+    cudaDevAttrMaxGridDimX = 5
+  };
+
+  cudaError_t cudaDeviceGetAttribute (int *, enum cudaDeviceAttr, int);
+  cudaError_t cudaStreamQuery(cudaStream_t);
+  CUresult cuCtxGetApiVersion(CUcontext, unsigned int *);
+  CUresult cuStreamGetCtx (CUstream, CUcontext *);
+#endif
+
+int
+main ()
+{
+  int ivar;
+  unsigned uvar;
+  omp_interop_rc_t res;
+  omp_interop_t obj_cuda = omp_interop_none;
+  omp_interop_t obj_cuda_driver = omp_interop_none;
+  cudaError_t cuda_err;
+  CUresult cu_err;
+
+  #pragma omp interop init(target, targetsync, prefer_type("cuda") : obj_cuda) 
\
+                     init(target, targetsync, prefer_type("cuda_driver") : 
obj_cuda_driver) \
+
+  omp_interop_fr_t fr = (omp_interop_fr_t) omp_get_interop_int (obj_cuda, 
omp_ipr_fr_id, &res);
+  assert (res == omp_irc_success);
+  assert (fr == omp_ifr_cuda);
+
+  fr = (omp_interop_fr_t) omp_get_interop_int (obj_cuda_driver, omp_ipr_fr_id, 
&res);
+  assert (res == omp_irc_success);
+  assert (fr == omp_ifr_cuda_driver);
+
+  ivar = (int) omp_get_interop_int (obj_cuda, omp_ipr_vendor, &res);
+  assert (res == omp_irc_success);
+  assert (ivar == 11);
+
+  ivar = (int) omp_get_interop_int (obj_cuda_driver, omp_ipr_vendor, &res);
+  assert (res == omp_irc_success);
+  assert (ivar == 11);
+
+
+  /* Check whether the omp_ipr_device -> cudaDevice_t yields a valid device.  
*/
+
+  CUdevice cu_dev = (int) omp_get_interop_int (obj_cuda_driver, 
omp_ipr_device, &res);
+  assert (res == omp_irc_success);
+
+  /* Assume a clock size is available and > 1 GHz; value is in kHz.  */
+  cu_err = cuDeviceGetAttribute (&ivar, cudaDevAttrClockRate, cu_dev);
+  assert (cu_err == CUDA_SUCCESS);
+  assert (ivar > 1000000 /* kHz */);
+
+  /* Assume that the MaxGridDimX is available and > 1024.  */
+  cu_err = cuDeviceGetAttribute (&ivar, cudaDevAttrMaxGridDimX, cu_dev);
+  assert (cu_err == CUDA_SUCCESS);
+  assert (ivar > 1024);
+
+  int cuda_dev = (int) omp_get_interop_int (obj_cuda, omp_ipr_device, &res);
+  assert (res == omp_irc_success);
+  assert (cuda_dev == (CUdevice) cu_dev); // Assume they are the same ...
+
+  /* Assume a clock size is available and > 1 GHz; value is in kHz.  */
+  cuda_err = cudaDeviceGetAttribute (&ivar, cudaDevAttrClockRate, cuda_dev);
+  assert (cuda_err == cudaSuccess);
+  assert (ivar > 1000000 /* kHz */);
+
+  /* Assume that the MaxGridDimX is available and > 1024.  */
+  cuda_err = cudaDeviceGetAttribute (&ivar, cudaDevAttrMaxGridDimX, cuda_dev);
+  assert (cuda_err == cudaSuccess);
+  assert (ivar > 1024);
+
+
+
+
+  /* Check whether the omp_ipr_device_context -> CUcontext yields a context.  
*/
+
+  CUcontext cu_ctx = (CUcontext) omp_get_interop_ptr (obj_cuda_driver, 
omp_ipr_device_context, &res);
+  assert (res == omp_irc_success);
+
+  /* Assume API Version > 0 for Nvidia, cudaErrorNotSupported for AMD.  */
+  uvar = 99;
+  cu_err = cuCtxGetApiVersion (cu_ctx, &uvar);
+  assert (cu_err == CUDA_SUCCESS);
+  assert (uvar > 0);
+
+
+  /* Check whether the omp_ipr_targetsync -> cudaStream_t yields a stream.  */
+
+  cudaStream_t cuda_sm = (cudaStream_t) omp_get_interop_ptr (obj_cuda, 
omp_ipr_targetsync, &res);
+  assert (res == omp_irc_success);
+
+  CUstream cu_sm = (cudaStream_t) omp_get_interop_ptr (obj_cuda_driver, 
omp_ipr_targetsync, &res);
+  assert (res == omp_irc_success);
+
+  assert ((void*) cu_sm != (void*) cuda_sm); // Type compatible but should 
have created two streams
+
+  int dev_stream = 99;
+#if CUDA_VERSION >= 12080
+  cuda_err = cudaStreamGetDevice (cuda_sm, &dev_stream);
+  assert (cuda_err == cudaSuccess);
+#else
+  cu_err = cuStreamGetCtx (cu_sm, &cu_ctx) != CUDA_SUCCESS;
+  if (cu_err == CUDA_SUCCESS)
+    cuda_err = cuCtxPushCurrent (cu_ctx) != CUDA_SUCCESS;
+  if (cu_err == CUDA_SUCCESS)
+    cuda_err = cuCtxGetDevice (&dev_stream) != CUDA_SUCCESS;
+  if (cu_err == CUDA_SUCCESS)
+    cu_err = cuCtxPopCurrent (&cu_ctx) != CUDA_SUCCESS;
+  assert (cu_err == CUDA_SUCCESS);
+#endif
+  assert (dev_stream == cuda_dev);
+
+  /* All jobs should have been completed (as there were none none)  */
+  cuda_err = cudaStreamQuery (cuda_sm);
+  assert (cuda_err == cudaSuccess);
+
+  cu_err = cuStreamQuery (cu_sm);
+  assert (cu_err == CUDA_SUCCESS);
+
+  #pragma omp interop destroy(obj_cuda, obj_cuda_driver)
+}
diff --git a/libgomp/testsuite/libgomp.c/interop-cuda-libonly.c 
b/libgomp/testsuite/libgomp.c/interop-cuda-libonly.c
new file mode 100644
index 000000000000..17cbb1591838
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-cuda-libonly.c
@@ -0,0 +1,8 @@
+/* { dg-require-effective-target openacc_libcudart } */
+/* { dg-require-effective-target openacc_libcuda } */
+/* { dg-additional-options "-lcuda -lcudart" } */
+
+/* Same as interop-cuda-full.c, but also works if the header is not available. 
*/
+
+#define USE_CUDA_FALLBACK_HEADER 1
+#include "interop-cuda-full.c"
diff --git a/libgomp/testsuite/libgomp.c/interop-hip-amd-full.c 
b/libgomp/testsuite/libgomp.c/interop-hip-amd-full.c
new file mode 100644
index 000000000000..d7725fc8e349
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hip-amd-full.c
@@ -0,0 +1,7 @@
+/* { dg-require-effective-target gomp_hip_header_amd } */
+/* { dg-require-effective-target gomp_libamdhip64 } */
+/* { dg-additional-options "-lamdhip64" } */
+
+#define __HIP_PLATFORM_AMD__ 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hip-amd-no-hip-header.c 
b/libgomp/testsuite/libgomp.c/interop-hip-amd-no-hip-header.c
new file mode 100644
index 000000000000..25845379fcc1
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hip-amd-no-hip-header.c
@@ -0,0 +1,8 @@
+/* { dg-require-effective-target gomp_libamdhip64 } */
+/* { dg-additional-options "-lamdhip64" } */
+
+#define __HIP_PLATFORM_AMD__ 1
+
+#define USE_HIP_FALLBACK_HEADER 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hip-nvidia-full.c 
b/libgomp/testsuite/libgomp.c/interop-hip-nvidia-full.c
new file mode 100644
index 000000000000..324504feb228
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hip-nvidia-full.c
@@ -0,0 +1,8 @@
+/* { dg-require-effective-target openacc_cudart } */
+/* { dg-require-effective-target openacc_cuda } */
+/* { dg-require-effective-target gomp_hip_header_nvidia } */
+/* { dg-additional-options "-lcuda -lcudart" } */
+
+#define __HIP_PLATFORM_NVIDIA__ 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hip-nvidia-no-headers.c 
b/libgomp/testsuite/libgomp.c/interop-hip-nvidia-no-headers.c
new file mode 100644
index 000000000000..4586398ff00e
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hip-nvidia-no-headers.c
@@ -0,0 +1,10 @@
+/* { dg-require-effective-target openacc_libcudart } */
+/* { dg-require-effective-target openacc_libcuda } */
+/* { dg-additional-options "-lcuda -lcudart" } */
+
+#define __HIP_PLATFORM_NVIDIA__ 1
+
+#define USE_HIP_FALLBACK_HEADER 1
+#define USE_CUDA_FALLBACK_HEADER 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hip-nvidia-no-hip-header.c 
b/libgomp/testsuite/libgomp.c/interop-hip-nvidia-no-hip-header.c
new file mode 100644
index 000000000000..41869848fc5d
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hip-nvidia-no-hip-header.c
@@ -0,0 +1,9 @@
+/* { dg-require-effective-target openacc_cudart } */
+/* { dg-require-effective-target openacc_cuda } */
+/* { dg-additional-options "-lcuda -lcudart" } */
+
+#define __HIP_PLATFORM_NVIDIA__ 1
+
+#define USE_HIP_FALLBACK_HEADER 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hip.h 
b/libgomp/testsuite/libgomp.c/interop-hip.h
new file mode 100644
index 000000000000..20a1ccb78fb2
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hip.h
@@ -0,0 +1,234 @@
+/* Minimal check whether HIP works - by checking whether the API routines
+   seem to work.  This includes various fallbacks if the header is not
+   available.  */
+
+#include <assert.h>
+#include <omp.h>
+
+#if !defined(__HIP_PLATFORM_AMD__) && !defined(__HIP_PLATFORM_NVIDIA__)
+  #error "Either __HIP_PLATFORM_AMD__ or __HIP_PLATFORM_NVIDIA__ must be 
defined"
+#endif
+
+#if defined(__HIP_PLATFORM_AMD__) && defined(__HIP_PLATFORM_NVIDIA__)
+  #error "Either __HIP_PLATFORM_AMD__ or __HIP_PLATFORM_NVIDIA__ must be 
defined"
+#endif
+
+#if __has_include(<hip/hip_runtime_api.h>) && !defined(USE_HIP_FALLBACK_HEADER)
+  #include <hip/hip_runtime_api.h>
+
+#elif defined(__HIP_PLATFORM_AMD__)
+  /* Add a poor man's fallback declaration.  */
+  #if !defined(USE_HIP_FALLBACK_HEADER)
+    #warning "Using fallback declaration for <hip/hip_runtime_api.h> for 
__HIP_PLATFORM_AMD__"
+  #endif
+
+  typedef struct ihipStream_t* hipStream_t;
+  typedef struct ihipCtx_t* hipCtx_t;
+  typedef int hipError_t;
+  typedef int hipDevice_t;
+  enum {
+    hipSuccess = 0,
+    hipErrorNotSupported = 801
+  };
+
+  typedef enum hipDeviceAttribute_t {
+    hipDeviceAttributeClockRate = 5,
+    hipDeviceAttributeMaxGridDimX = 29
+  } hipDeviceAttribute_t;
+
+  hipError_t hipDeviceGetAttribute (int *, hipDeviceAttribute_t, hipDevice_t);
+  hipError_t hipCtxGetApiVersion (hipCtx_t, int *);
+  hipError_t hipStreamGetDevice (hipStream_t, hipDevice_t *);
+  hipError_t hipStreamQuery (hipStream_t);
+
+#elif defined(__HIP_PLATFORM_NVIDIA__)
+  /* Add a poor man's fallback declaration.  */
+  #if !defined(USE_HIP_FALLBACK_HEADER)
+    #warning "Using fallback declaration for <hip/hip_runtime_api.h> for 
__HIP_PLATFORM_NVIDIA__"
+  #endif
+
+  #if __has_include(<cuda.h>) && __has_include(<cudaTypedefs.h>) && 
__has_include(<cuda_runtime.h>) && !defined(USE_CUDA_FALLBACK_HEADER)
+    #include <cuda.h>
+    #include <cudaTypedefs.h>
+    #include <cuda_runtime.h>
+  #else
+    #if defined(USE_CUDA_FALLBACK_HEADER)
+       // no warning
+    #elif !__has_include(<cuda.h>)
+      #warning "Using GCC's cuda.h as fallback for cuda.h"
+    #elif !__has_include(<cudaTypedefs.h>)
+      #warning "Using GCC's cuda.h as fallback for cudaTypedefs.h"
+    #else
+      #warning "Using GCC's cuda.h as fallback for cuda_runtime.h"
+    #endif
+
+    #include "../../../include/cuda/cuda.h"
+
+    typedef int cudaError_t;
+    enum {
+      cudaSuccess = 0
+    };
+
+    enum cudaDeviceAttr {
+      cudaDevAttrClockRate = 13,
+      cudaDevAttrMaxGridDimX = 5
+    };
+
+    cudaError_t cudaDeviceGetAttribute (int *, enum cudaDeviceAttr, int);
+    CUresult cuCtxGetApiVersion(CUcontext, unsigned int *);
+    CUresult cuStreamGetCtx (CUstream, CUcontext *);
+  #endif
+
+  typedef CUstream hipStream_t;
+  typedef CUcontext hipCtx_t;
+  typedef CUdevice hipDevice_t;
+
+  typedef int hipError_t;
+  typedef int hipDevice_t;
+  enum {
+    hipSuccess = 0,
+    hipErrorNotSupported = 801
+  };
+
+
+  typedef enum hipDeviceAttribute_t {
+    hipDeviceAttributeClockRate = 5,
+    hipDeviceAttributeMaxGridDimX = 29
+  } hipDeviceAttribute_t;
+
+  inline static hipError_t
+  hipDeviceGetAttribute (int *ival, hipDeviceAttribute_t attr, hipDevice_t dev)
+  {
+    enum cudaDeviceAttr cuattr;
+    switch (attr)
+      {
+      case hipDeviceAttributeClockRate:
+       cuattr = cudaDevAttrClockRate;
+       break;
+      case hipDeviceAttributeMaxGridDimX:
+       cuattr = cudaDevAttrMaxGridDimX;
+       break;
+      default:
+       assert (0);
+      }
+    return cudaDeviceGetAttribute (ival, cuattr, dev) != cudaSuccess;
+  }
+
+  inline static hipError_t
+  hipCtxGetApiVersion (hipCtx_t ctx, int *ver)
+  {
+    unsigned uver;
+    hipError_t err;
+    err = cuCtxGetApiVersion (ctx, &uver) != CUDA_SUCCESS;
+    *ver = (int) uver;
+    return err;
+  }
+
+  inline static hipError_t
+  hipStreamGetDevice (hipStream_t stream, hipDevice_t *dev)
+  {
+#if CUDA_VERSION >= 12080
+    return cudaStreamGetDevice (stream, dev);
+#else
+    hipError_t err;
+    CUcontext ctx;
+    err = cuStreamGetCtx (stream, &ctx) != CUDA_SUCCESS;
+    if (err == hipSuccess)
+      err = cuCtxPushCurrent (ctx) != CUDA_SUCCESS;
+    if (err == hipSuccess)
+      err = cuCtxGetDevice (dev) != CUDA_SUCCESS;
+    if (err == hipSuccess)
+      err = cuCtxPopCurrent (&ctx) != CUDA_SUCCESS;
+    return err;
+#endif
+  }
+
+  inline static hipError_t
+  hipStreamQuery (hipStream_t stream)
+  {
+    return cuStreamQuery (stream) != CUDA_SUCCESS;
+  }
+
+#else
+  #error "should be unreachable"
+#endif
+
+int
+main ()
+{
+  int ivar;
+  omp_interop_rc_t res;
+  omp_interop_t obj = omp_interop_none;
+  hipError_t hip_err;
+
+  #pragma omp interop init(target, targetsync, prefer_type("hip") : obj)
+
+  omp_interop_fr_t fr = (omp_interop_fr_t) omp_get_interop_int (obj, 
omp_ipr_fr_id, &res);
+  assert (res == omp_irc_success);
+  assert (fr == omp_ifr_hip);
+
+  ivar = (int) omp_get_interop_int (obj, omp_ipr_vendor, &res);
+  assert (res == omp_irc_success);
+  int vendor_is_amd = ivar == 1;
+  #if defined(__HIP_PLATFORM_AMD__)
+    assert (ivar == 1);
+  #elif defined(__HIP_PLATFORM_NVIDIA__)
+    assert (ivar == 11);
+  #else
+    assert (0);
+  #endif
+
+
+  /* Check whether the omp_ipr_device -> hipDevice_t yields a valid device.  */
+
+  hipDevice_t hip_dev = (int) omp_get_interop_int (obj, omp_ipr_device, &res);
+  assert (res == omp_irc_success);
+
+  /* Assume a clock size is available and > 1 GHz; value is in kHz.  */
+  hip_err = hipDeviceGetAttribute (&ivar, hipDeviceAttributeClockRate, 
hip_dev);
+  assert (hip_err == hipSuccess);
+  assert (ivar > 1000000 /* kHz */);
+
+  /* Assume that the MaxGridDimX is available and > 1024.  */
+  hip_err = hipDeviceGetAttribute (&ivar, hipDeviceAttributeMaxGridDimX, 
hip_dev);
+  assert (hip_err == hipSuccess);
+  assert (ivar > 1024);
+
+
+  /* Check whether the omp_ipr_device_context -> hipCtx_t yields a context.  */
+
+  hipCtx_t hip_ctx = (hipCtx_t) omp_get_interop_ptr (obj, 
omp_ipr_device_context, &res);
+  assert (res == omp_irc_success);
+
+  /* Assume API Version > 0 for Nvidia, hipErrorNotSupported for AMD.  */
+  ivar = -99;
+  #pragma GCC diagnostic push
+  #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+    hip_err = hipCtxGetApiVersion (hip_ctx, &ivar);
+  #pragma GCC diagnostic pop
+
+  if (vendor_is_amd)
+    assert (hip_err == hipErrorNotSupported && ivar == -99);
+  else
+    {
+      assert (hip_err == hipSuccess);
+      assert (ivar > 0);
+    }
+
+
+  /* Check whether the omp_ipr_targetsync -> hipStream_t yields a stream.  */
+
+  hipStream_t hip_sm = (hipStream_t) omp_get_interop_ptr (obj, 
omp_ipr_targetsync, &res);
+  assert (res == omp_irc_success);
+
+  hipDevice_t dev_stream = 99;
+  hip_err = hipStreamGetDevice (hip_sm, &dev_stream);
+  assert (hip_err == hipSuccess);
+  assert (dev_stream == hip_dev);
+
+  /* All jobs should have been completed (as there were none none)  */
+  hip_err = hipStreamQuery (hip_sm);
+  assert (hip_err == hipSuccess);
+
+  #pragma omp interop destroy(obj)
+}
diff --git a/libgomp/testsuite/libgomp.c/interop-hipblas-amd-full.c 
b/libgomp/testsuite/libgomp.c/interop-hipblas-amd-full.c
new file mode 100644
index 000000000000..53c05bd82830
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hipblas-amd-full.c
@@ -0,0 +1,7 @@
+/* { dg-require-effective-target gomp_hip_header_amd } */
+/* { dg-require-effective-target gomp_libhipblas } */
+/* { dg-additional-options "-lhipblas" } */
+
+#define __HIP_PLATFORM_AMD__ 1
+
+#include "interop-hipblas.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hipblas-amd-no-hip-header.c 
b/libgomp/testsuite/libgomp.c/interop-hipblas-amd-no-hip-header.c
new file mode 100644
index 000000000000..0ea3133f8844
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hipblas-amd-no-hip-header.c
@@ -0,0 +1,8 @@
+/* { dg-require-effective-target gomp_libhipblas } */
+/* { dg-additional-options "-lhipblas" } */
+
+#define __HIP_PLATFORM_AMD__ 1
+
+#define USE_HIP_FALLBACK_HEADER 1
+
+#include "interop-hipblas.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-full.c 
b/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-full.c
new file mode 100644
index 000000000000..c195d2486f69
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-full.c
@@ -0,0 +1,7 @@
+/* { dg-require-effective-target openacc_cublas } */
+/* { dg-require-effective-target gomp_hip_header_nvidia } */
+/* { dg-additional-options "-lcublas" } */
+
+#define __HIP_PLATFORM_NVIDIA__ 1
+
+#include "interop-hipblas.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-no-headers.c 
b/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-no-headers.c
new file mode 100644
index 000000000000..1a31b308848c
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-no-headers.c
@@ -0,0 +1,9 @@
+/* { dg-require-effective-target openacc_libcublas } */
+/* { dg-additional-options "-lcublas" } */
+
+#define __HIP_PLATFORM_NVIDIA__ 1
+
+#define USE_HIP_FALLBACK_HEADER 1
+#define USE_CUDA_FALLBACK_HEADER 1
+
+#include "interop-hipblas.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-no-hip-header.c 
b/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-no-hip-header.c
new file mode 100644
index 000000000000..f85c13be5ec4
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hipblas-nvidia-no-hip-header.c
@@ -0,0 +1,8 @@
+/* { dg-require-effective-target openacc_cublas } */
+/* { dg-additional-options "-lcublas" } */
+
+#define __HIP_PLATFORM_NVIDIA__ 1
+
+#define USE_HIP_FALLBACK_HEADER 1
+
+#include "interop-hipblas.h"
diff --git a/libgomp/testsuite/libgomp.c/interop-hipblas.h 
b/libgomp/testsuite/libgomp.c/interop-hipblas.h
new file mode 100644
index 000000000000..11cb4d280309
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/interop-hipblas.h
@@ -0,0 +1,228 @@
+/* Check whether hipBlas' daxpy works with an interop object.
+     daxpy(N, DA, DX, INCX, DY, INCY)
+   calculates (for DX = DY = 1):
+     DY(1:N) =  DY(1:N) + DA * DX(1:N)
+   and otherwise N array elements, taking every INCX-th or INCY-th one, 
repectively.
+
+Based on the interop example in OpenMP's example document  */
+
+/* Minimal check whether HIP works - by checking whether the API routines
+   seem to work.  This includes a fallback if the header is not
+   available.  */
+
+#if !defined(__HIP_PLATFORM_AMD__) && !defined(__HIP_PLATFORM_NVIDIA__)
+  #error "Either __HIP_PLATFORM_AMD__ or __HIP_PLATFORM_NVIDIA__ must be 
defined"
+#endif
+
+#if defined(__HIP_PLATFORM_AMD__) && defined(__HIP_PLATFORM_NVIDIA__)
+  #error "Either __HIP_PLATFORM_AMD__ or __HIP_PLATFORM_NVIDIA__ must be 
defined"
+#endif
+
+
+#include <assert.h>
+#include <omp.h>
+#include "../libgomp.c-c++-common/on_device_arch.h"
+
+
+#if __has_include(<hipblas/hipblas.h>) && !defined(USE_HIP_FALLBACK_HEADER)
+  #include <hipblas/hipblas.h>
+
+#elif defined(__HIP_PLATFORM_AMD__)
+  /* Add a poor man's fallback declaration.  */
+  #if !defined(USE_HIP_FALLBACK_HEADER)
+    #warning "Using fallback declaration for <hipblas/hipblas.h> for 
__HIP_PLATFORM_AMD__"
+  #endif
+
+  typedef enum
+  {
+    HIPBLAS_STATUS_SUCCESS = 0
+
+  } hipblasStatus_t;
+
+  typedef struct ihipStream_t* hipStream_t;
+  typedef void* hipblasHandle_t;
+
+  hipblasStatus_t hipblasCreate (hipblasHandle_t*);
+  hipblasStatus_t hipblasSetStream (hipblasHandle_t, hipStream_t);
+  hipblasStatus_t hipblasDaxpy (hipblasHandle_t, int, const double*, const 
double*, int, double*, int);
+
+#else
+  /* Add a poor man's fallback declaration.  */
+  #if !defined(USE_HIP_FALLBACK_HEADER)
+    #warning "Using fallback declaration for <hipblas/hipblas.h> for 
__HIP_PLATFORM_NVIDA__"
+  #endif
+
+  #if __has_include(<cuda.h>) && __has_include(<cudaTypedefs.h>) && 
__has_include(<cuda_runtime.h>) && __has_include(<cublas_v2.h>) && 
!defined(USE_CUDA_FALLBACK_HEADER)
+    #include <cuda.h>
+    #include <cudaTypedefs.h>
+    #include <cuda_runtime.h>
+    #include <cublas_v2.h>
+
+  #else
+    /* Add a poor man's fallback declaration.  */
+    #if defined(USE_CUDA_FALLBACK_HEADER)
+      // no warning
+    #elif !__has_include(<cuda.h>)
+      #warning "Using GCC's cuda.h as fallback for cuda.h"
+    #elif !__has_include(<cudaTypedefs.h>)
+      #warning "Using GCC's cuda.h as fallback for cudaTypedefs.h"
+    #elif !__has_include(<cuda_runtime.h>)
+      #warning "Using GCC's cuda.h as fallback for cuda_runtime.h"
+    #else
+      #warning "Using GCC's cuda.h as fallback for cublas_v2.h"
+    #endif
+    #include "../../../include/cuda/cuda.h"
+
+    typedef enum {
+      CUBLAS_STATUS_SUCCESS = 0,
+    } cublasStatus_t;
+
+    typedef CUstream cudaStream_t;
+    typedef struct cublasContext* cublasHandle_t;
+
+    #define cublasCreate cublasCreate_v2
+    cublasStatus_t cublasCreate_v2 (cublasHandle_t *);
+
+    #define cublasSetStream cublasSetStream_v2
+    cublasStatus_t cublasSetStream_v2 (cublasHandle_t, cudaStream_t);
+
+    #define cublasDaxpy cublasDaxpy_v2
+    cublasStatus_t cublasDaxpy_v2(cublasHandle_t, int, const double*, const 
double*, int, double*, int);
+  #endif
+
+  #define HIPBLAS_STATUS_SUCCESS CUBLAS_STATUS_SUCCESS
+  #define hipblasStatus_t cublasStatus_t
+  #define hipStream_t cudaStream_t
+  #define hipblasHandle_t cublasHandle_t
+  #define hipblasCreate cublasCreate
+  #define hipblasSetStream cublasSetStream
+  #define hipblasDaxpy cublasDaxpy
+#endif
+
+static int used_variant = 0;
+
+void
+run_hipBlasdaxpy (int n, double da, const double *dx, int incx, double *dy, 
int incy, omp_interop_t obj)
+{
+  used_variant = 1;
+
+  omp_interop_rc_t res;
+  hipblasStatus_t stat;
+
+  omp_intptr_t fr = omp_get_interop_int(obj, omp_ipr_fr_id, &res);
+  assert (res == omp_irc_success && fr == omp_ifr_hip);
+
+  hipStream_t stream = (hipStream_t) omp_get_interop_ptr (obj, 
omp_ipr_targetsync, &res);
+  assert (res == omp_irc_success);
+
+  hipblasHandle_t handle;
+  stat = hipblasCreate (&handle);
+  assert (stat == HIPBLAS_STATUS_SUCCESS);
+
+  stat = hipblasSetStream (handle, stream);
+  assert (stat == HIPBLAS_STATUS_SUCCESS);
+
+  /* 'da' can be in host or device space, 'dx' and 'dy' must be in device 
space.  */
+  stat = hipblasDaxpy (handle, n, &da, dx, 1, dy, 1) ;
+  assert (stat == HIPBLAS_STATUS_SUCCESS);
+}
+
+#if defined(__HIP_PLATFORM_AMD__)
+#pragma omp declare variant(run_hipBlasdaxpy) \
+                       match(construct={dispatch}, 
target_device={kind(nohost), arch("amdgcn")}) \
+                       adjust_args(need_device_ptr : dx, dy) \
+                       append_args(interop(targetsync, prefer_type("hip")))
+#elif defined(__HIP_PLATFORM_NVIDIA__) 
+#pragma omp declare variant(run_hipBlasdaxpy) \
+                       match(construct={dispatch}, 
target_device={kind(nohost), arch("nvptx")}) \
+                       adjust_args(need_device_ptr : dx, dy) \
+                       append_args(interop(targetsync, prefer_type("hip")))
+#else
+ #error "wrong platform"
+#endif
+
+void
+run_daxpy (int n, double da, const double *dx, int incx, double *dy, int incy)
+{
+  used_variant = 2;
+
+  if (incx == 1 && incy == 1)
+    #pragma omp simd
+    for (int i = 0; i < n; i++)
+      dy[i] += da * dx[i];
+  else
+    {
+      int ix = 0;
+      int iy = 0;
+      for (int i = 0; i < n; i++)
+       {
+         dy[iy] += da * dx[ix];
+         ix += incx;
+         iy += incy;
+       }
+    }
+}
+
+
+void
+run_test (int dev)
+{
+  constexpr int N = 1024;
+
+  // A = {1,2,...,N}
+  // B = {-1, -2, ..., N}
+  // B' = daxpy (N, 3, A, incx=1, B, incy=1)
+  //    = B + 3*A
+  // -> B' = {0, 2, 4, 6, ... }
+
+  double A[N], B[N];
+  double factor = 3.0;
+  for (int i = 0; i < N; i++)
+    {
+      A[i] = i;
+      B[i] = -i;
+    }
+
+  if (dev != omp_initial_device && dev != omp_get_num_devices ())
+    {
+      #pragma omp target enter data device(dev) map(A, B)
+    }
+
+  used_variant = 99;
+  #pragma omp dispatch device(dev)
+    run_daxpy (N, factor, A, 1, B, 1);  
+
+  if (dev != omp_initial_device && dev != omp_get_num_devices ())
+    {
+      #pragma omp target exit data device(dev) map(release: A) map(from: B)
+
+      int tmp = omp_get_default_device ();
+      omp_set_default_device (dev);
+#if defined(__HIP_PLATFORM_AMD__)
+      if (on_device_arch_gcn ())
+#else
+      if (on_device_arch_nvptx ())
+#endif
+       assert (used_variant == 1);
+      else
+       assert (used_variant == 2);
+      omp_set_default_device (tmp);
+    }
+  else
+    assert (used_variant == 2);
+
+  for (int i = 0; i < N; i++)
+    assert (B[i] == 2*i);
+}
+
+int   
+main () 
+{   
+  int ndev = omp_get_num_devices ();
+
+  for (int dev = 0; dev <= ndev; dev++)
+    run_test (dev);
+  run_test (omp_initial_device);  
+
+  return 0;
+}
diff --git a/libgomp/testsuite/libgomp.fortran/interop-hip-amd-full.F90 
b/libgomp/testsuite/libgomp.fortran/interop-hip-amd-full.F90
new file mode 100644
index 000000000000..bbd49ddd0102
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/interop-hip-amd-full.F90
@@ -0,0 +1,7 @@
+! { dg-require-effective-target gomp_hipfort_module }
+! { dg-require-effective-target gomp_libamdhip64 }
+! { dg-additional-options "-lamdhip64" }
+
+#define HAVE_HIPFORT 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.fortran/interop-hip-amd-no-module.F90 
b/libgomp/testsuite/libgomp.fortran/interop-hip-amd-no-module.F90
new file mode 100644
index 000000000000..0afec8318413
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/interop-hip-amd-no-module.F90
@@ -0,0 +1,6 @@
+! { dg-require-effective-target gomp_libamdhip64 }
+! { dg-additional-options "-lamdhip64" }
+
+#define USE_HIP_FALLBACK_MODULE 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.fortran/interop-hip-nvidia-full.F90 
b/libgomp/testsuite/libgomp.fortran/interop-hip-nvidia-full.F90
new file mode 100644
index 000000000000..cef592f49268
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/interop-hip-nvidia-full.F90
@@ -0,0 +1,9 @@
+! { dg-require-effective-target gomp_hipfort_module }
+! { dg-require-effective-target openacc_cudart }
+! { dg-require-effective-target openacc_cuda }
+! { dg-additional-options "-lcuda -lcudart" }
+
+#define HAVE_HIPFORT 1
+#define USE_CUDA_NAMES 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.fortran/interop-hip-nvidia-no-module.F90 
b/libgomp/testsuite/libgomp.fortran/interop-hip-nvidia-no-module.F90
new file mode 100644
index 000000000000..c1ef29d70083
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/interop-hip-nvidia-no-module.F90
@@ -0,0 +1,8 @@
+! { dg-require-effective-target openacc_libcudart }
+! { dg-require-effective-target openacc_libcuda }
+! { dg-additional-options "-lcuda -lcudart" }
+
+#define USE_CUDA_NAMES 1
+#define USE_HIP_FALLBACK_MODULE 1
+
+#include "interop-hip.h"
diff --git a/libgomp/testsuite/libgomp.fortran/interop-hip.h 
b/libgomp/testsuite/libgomp.fortran/interop-hip.h
new file mode 100644
index 000000000000..753ccce5fd7d
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/interop-hip.h
@@ -0,0 +1,214 @@
+! Minimal check whether HIP works - by checking whether the API routines
+! seem to work.  This includes a  fallback if hipfort is not available
+
+#ifndef HAVE_HIPFORT
+#ifndef USE_HIP_FALLBACK_MODULE
+#if USE_CUDA_NAMES
+#warning "Using fallback implementation for module hipfort as HAVE_HIPFORT is 
undefined (for NVIDA/CUDA)"
+#else
+#warning "Using fallback implementation for module hipfort as HAVE_HIPFORT is 
undefined - assume AMD as USE_CUDA_NAMES is unset"
+#endif
+#endif
+module hipfort  ! Minimal implementation for the testsuite
+  implicit none
+
+  enum, bind(c)
+    enumerator :: hipSuccess = 0
+    enumerator :: hipErrorNotSupported = 801
+  end enum
+
+  enum, bind(c)
+    enumerator :: hipDeviceAttributeClockRate = 5
+    enumerator :: hipDeviceAttributeMaxGridDimX = 29
+  end enum
+
+  interface
+    integer(kind(hipSuccess)) function hipDeviceGetAttribute (ip, attr, dev) &
+#if USE_CUDA_NAMES
+        bind(c, name="cudaDeviceGetAttribute")
+#else
+        bind(c, name="hipDeviceGetAttribute")
+#endif
+      use iso_c_binding, only: c_ptr, c_int
+      import
+      implicit none
+      type(c_ptr), value :: ip
+      integer(kind(hipDeviceAttributeClockRate)), value :: attr
+      integer(c_int), value :: dev
+    end
+
+    integer(kind(hipSuccess)) function hipCtxGetApiVersion (ctx, ip) &
+#if USE_CUDA_NAMES
+        bind(c, name="cudaCtxGetApiVersion")
+#else
+        bind(c, name="hipCtxGetApiVersion")
+#endif
+      use iso_c_binding, only: c_ptr
+      import
+      implicit none
+      type(c_ptr), value :: ctx, ip
+    end
+
+    integer(kind(hipSuccess)) function hipStreamQuery (stream) &
+#if USE_CUDA_NAMES
+        bind(c, name="cudaStreamQuery")
+#else
+        bind(c, name="hipStreamQuery")
+#endif
+      use iso_c_binding, only: c_ptr
+      import
+      implicit none
+      type(c_ptr), value :: stream
+    end
+
+   integer(kind(hipSuccess)) function hipStreamGetFlags (stream, flags) &
+#if USE_CUDA_NAMES
+        bind(c, name="cudaStreamGetFlags")
+#else
+        bind(c, name="hipStreamGetFlags")
+#endif
+      use iso_c_binding, only: c_ptr
+      import
+      implicit none
+      type(c_ptr), value :: stream
+      type(c_ptr), value :: flags
+    end
+  end interface
+end module
+#endif
+
+program main
+  use iso_c_binding, only: c_ptr, c_int, c_loc
+  use omp_lib
+  use hipfort
+  implicit none (type, external)
+
+! Only supported since CUDA 12.8 - skip for better compatibility
+!  ! Manally implement hipStreamGetDevice as hipfort misses it
+!  ! -> https://github.com/ROCm/hipfort/issues/238
+!  interface
+!    integer(kind(hipSuccess)) function my_hipStreamGetDevice(stream, dev) &
+!#if USE_CUDA_NAMES
+!        bind(c, name="cudaStreamGetDevice")
+!#else
+!        bind(c, name="hipStreamGetDevice")
+!#endif
+!      use iso_c_binding, only: c_ptr, c_int
+!      import
+!      implicit none
+!      type(c_ptr), value :: stream
+!      integer(c_int) :: dev
+!    end
+!  end interface
+
+  integer(c_int), target :: ivar
+  integer(omp_interop_rc_kind) :: res
+  integer(omp_interop_kind) :: obj
+  integer(omp_interop_fr_kind) :: fr
+  integer(kind(hipSuccess)) :: hip_err
+  integer(c_int) :: hip_dev, dev_stream
+  type(c_ptr) :: hip_ctx, hip_sm
+
+  logical :: vendor_is_amd
+
+  obj = omp_interop_none
+
+  !$omp interop init(target, targetsync, prefer_type("hip") : obj)
+
+  fr = omp_get_interop_int (obj, omp_ipr_fr_id, res)
+  if (res /= omp_irc_success) error stop 1
+  if (fr /= omp_ifr_hip) error stop 1
+
+  ivar = omp_get_interop_int (obj, omp_ipr_vendor, res)
+  if (ivar == 1) then  ! AMD
+    vendor_is_amd = .true.
+  else if (ivar == 11) then  ! Nvidia
+    vendor_is_amd = .false.
+  else
+    error stop 1  ! Unknown
+  endif
+#if USE_CUDA_NAMES
+  if (vendor_is_amd) error stop 1
+#else
+  if (.not. vendor_is_amd) error stop 1
+#endif
+
+  ! Check whether the omp_ipr_device -> hipDevice_t yields a valid device.
+
+  hip_dev = omp_get_interop_int (obj, omp_ipr_device, res)
+  if (res /= omp_irc_success) error stop 1
+
+! AMD messed up in Fortran with the attribute handling, missing the
+! translation table it has for C.
+block
+  enum, bind(c)
+    enumerator :: cudaDevAttrClockRate = 13
+    enumerator :: cudaDevAttrMaxGridDimX = 5
+  end enum
+
+  ! Assume a clock size is available and > 1 GHz; value is in kHz.
+  ! c_loc is completely bogus, but as AMD messed up the interface ...
+  ! Cf. https://github.com/ROCm/hipfort/issues/239
+if (vendor_is_amd) then
+  hip_err = hipDeviceGetAttribute (c_loc(ivar), hipDeviceAttributeClockRate, 
hip_dev)
+else
+  hip_err = hipDeviceGetAttribute (c_loc(ivar), cudaDevAttrClockRate, hip_dev)
+endif
+  if (hip_err /= hipSuccess) error stop 1
+  if (ivar <= 1000000) error stop 1  ! in kHz
+
+  ! Assume that the MaxGridDimX is available and > 1024
+  ! c_loc is completely bogus, but as AMD messed up the interface ...
+  ! Cf. https://github.com/ROCm/hipfort/issues/239
+if (vendor_is_amd) then
+  hip_err = hipDeviceGetAttribute (c_loc(ivar), hipDeviceAttributeMaxGridDimX, 
hip_dev)
+else
+  hip_err = hipDeviceGetAttribute (c_loc(ivar), cudaDevAttrMaxGridDimX, 
hip_dev)
+endif
+  if (hip_err /= hipSuccess) error stop 1
+  if (ivar <= 1024) error stop 1
+end block
+
+
+  ! Check whether the omp_ipr_device_context -> hipCtx_t yields a context.
+
+  hip_ctx = omp_get_interop_ptr (obj, omp_ipr_device_context, res)
+  if (res /= omp_irc_success) error stop 1
+
+!  ! Assume API Version > 0 for Nvidia, hipErrorNotSupported for AMD.  */
+!  ivar = -99
+!  ! AMD deprectated hipCtxGetApiVersion (in C/C++)
+!  hip_err = hipCtxGetApiVersion (hip_ctx, c_loc(ivar))
+!
+!  if (vendor_is_amd) then
+!    if (hip_err /= hipErrorNotSupported .or. ivar /= -99) error stop 1
+!  else
+!    if (hip_err /= hipSuccess) error stop 1
+!    if (ivar <= 0) error stop 1
+!  end if
+
+
+  ! Check whether the omp_ipr_targetsync -> hipStream_t yields a stream.
+
+  hip_sm = omp_get_interop_ptr (obj, omp_ipr_targetsync, res)
+  if (res /= omp_irc_success) error stop 1
+
+! Skip as this is only in CUDA 12.8
+!  dev_stream = 99
+!    ! Not (yet) implemented: https://github.com/ROCm/hipfort/issues/238
+!    !  hip_err = hipStreamGetDevice (hip_sm, dev_stream)
+!  hip_err = my_hipStreamGetDevice (hip_sm, dev_stream)
+!  if (hip_err /= hipSuccess) error stop 1
+!  if (dev_stream /= hip_dev) error stop 1
+
+  ! Get flags of the stream
+  hip_err = hipStreamGetFlags (hip_sm, c_loc (ivar))
+  if (hip_err /= hipSuccess) error stop 1
+  ! Accept any value
+
+  ! All jobs should have been completed (as there were none none)
+  hip_err = hipStreamQuery (hip_sm)
+  if (hip_err /= hipSuccess) error stop 1
+
+  !$omp interop destroy(obj)
+end

Reply via email to