Author: mitchell
Date: 2026-01-19T08:33:43+08:00
New Revision: ff6531399d53f5a17c3315dc49adb2296398d964

URL: 
https://github.com/llvm/llvm-project/commit/ff6531399d53f5a17c3315dc49adb2296398d964
DIFF: 
https://github.com/llvm/llvm-project/commit/ff6531399d53f5a17c3315dc49adb2296398d964.diff

LOG: [clang-tidy] Add documentation and smoke test for CUDA (#173699)

As of AI usage: Gemini 3 was used to refine the wording and style of the
documentation text.

Closes #173677

Added: 
    clang-tools-extra/test/clang-tidy/infrastructure/Inputs/CUDA/cuda.h
    clang-tools-extra/test/clang-tidy/infrastructure/cuda-basic.cu

Modified: 
    clang-tools-extra/docs/clang-tidy/index.rst
    clang-tools-extra/test/clang-tidy/check_clang_tidy.py

Removed: 
    


################################################################################
diff  --git a/clang-tools-extra/docs/clang-tidy/index.rst 
b/clang-tools-extra/docs/clang-tidy/index.rst
index 34da529902308..d931ad0197b70 100644
--- a/clang-tools-extra/docs/clang-tidy/index.rst
+++ b/clang-tools-extra/docs/clang-tidy/index.rst
@@ -349,6 +349,39 @@ An overview of all the command-line options:
         some-check.SomeOption: 'some value'
       ...
 
+Running Clang-Tidy on CUDA Files
+--------------------------------
+
+:program:`clang-tidy` supports analyzing CUDA source files. To ensure correct
+header resolution, it is important to specify the CUDA toolkit path using
+``--cuda-path``. For more details on how Clang handles CUDA, see
+`Compiling CUDA with Clang <https://llvm.org/docs/CompileCudaWithLLVM.html>`_.
+
+If you are using a GCC + NVCC build setup, the compiler command database will
+contain NVCC-specific flags that :program:`clang-tidy` does not understand.
+
+In this case, you should use the ``RemovedArgs`` configuration option (or
+``--removed-arg`` command-line option) to remove these flags, and
+``ExtraArgs`` (or ``--extra-arg``) to provide the ``--cuda-path``.
+
+For example, to remove the NVCC-specific ``-gencode`` flag and provide the
+CUDA path:
+
+.. code-block:: console
+
+  $ clang-tidy source.cu --removed-arg="-gencode" --removed-arg="arch=.." 
--extra-arg="--cuda-path=/path/to/cuda"
+
+By default, :program:`clang-tidy` will analyze both host and device code.
+To restrict the analysis to a specific side and specifically choose device
+compilation flags, use the ``--extra-arg`` flag to pass the arguments.
+
+For example, to perform device analysis only, use
+the ``--cuda-device-only`` flag:
+
+.. code-block:: console
+
+  $ clang-tidy source.cu --extra-arg="--cuda-device-only" 
--extra-arg="--cuda-path=/path/to/cuda"
+
 Clang-Tidy Automation
 =====================
 

diff  --git a/clang-tools-extra/test/clang-tidy/check_clang_tidy.py 
b/clang-tools-extra/test/clang-tidy/check_clang_tidy.py
index b173ecf4fbdca..4e42f0b12516b 100755
--- a/clang-tools-extra/test/clang-tidy/check_clang_tidy.py
+++ b/clang-tools-extra/test/clang-tidy/check_clang_tidy.py
@@ -110,7 +110,7 @@ def __init__(self, args: argparse.Namespace, extra_args: 
List[str]) -> None:
 
         file_name_with_extension = self.assume_file_name or 
self.input_file_name
         _, extension = os.path.splitext(file_name_with_extension)
-        if extension not in [".c", ".hpp", ".m", ".mm"]:
+        if extension not in [".c", ".hpp", ".m", ".mm", ".cu"]:
             extension = ".cpp"
         self.temp_file_name = self.temp_file_name + extension
 

diff  --git 
a/clang-tools-extra/test/clang-tidy/infrastructure/Inputs/CUDA/cuda.h 
b/clang-tools-extra/test/clang-tidy/infrastructure/Inputs/CUDA/cuda.h
new file mode 100644
index 0000000000000..f8e2219da9cd8
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/Inputs/CUDA/cuda.h
@@ -0,0 +1,64 @@
+/* Minimal declarations for CUDA support.  Testing purposes only. */
+
+#include <stddef.h>
+
+// Make this file work with nvcc, for testing compatibility.
+
+#ifndef __NVCC__
+#define __constant__ __attribute__((constant))
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __host__ __attribute__((host))
+#define __shared__ __attribute__((shared))
+#define __managed__ __attribute__((managed))
+#define __grid_constant__ __attribute__((grid_constant))
+#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__)))
+#define __cluster_dims__(...) __attribute__((cluster_dims(__VA_ARGS__)))
+#define __no_cluster__ __attribute__((no_cluster))
+
+struct dim3 {
+  unsigned x, y, z;
+  __host__ __device__ dim3(unsigned x, unsigned y = 1, unsigned z = 1) : x(x), 
y(y), z(z) {}
+};
+
+#ifdef __HIP__
+typedef struct hipStream *hipStream_t;
+typedef enum hipError {} hipError_t;
+int hipConfigureCall(dim3 gridSize, dim3 blockSize, size_t sharedSize = 0,
+                     hipStream_t stream = 0); // NOLINT
+extern "C" hipError_t __hipPushCallConfiguration(dim3 gridSize, dim3 blockSize,
+                                                 size_t sharedSize = 0,
+                                                 hipStream_t stream = 0); // 
NOLINT
+extern "C" hipError_t hipLaunchKernel(const void *func, dim3 gridDim,
+                                      dim3 blockDim, void **args,
+                                      size_t sharedMem,
+                                      hipStream_t stream);
+#else
+typedef struct cudaStream *cudaStream_t;
+typedef enum cudaError {} cudaError_t;
+
+extern "C" int cudaConfigureCall(dim3 gridSize, dim3 blockSize,
+                                 size_t sharedSize = 0,
+                                 cudaStream_t stream = 0); // NOLINT
+extern "C" int __cudaPushCallConfiguration(dim3 gridSize, dim3 blockSize,
+                                           size_t sharedSize = 0,
+                                           cudaStream_t stream = 0); // NOLINT
+extern "C" cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim,
+                                        dim3 blockDim, void **args,
+                                        size_t sharedMem, cudaStream_t stream);
+extern "C" __device__ cudaError_t cudaLaunchDevice(void *func,
+                                                   void *parameterBuffer,
+                                                   dim3 gridDim, dim3 blockDim,
+                                                   unsigned int sharedMem,
+                                                   cudaStream_t stream);
+extern "C" __device__ void *cudaGetParameterBuffer(size_t alignment,
+                                                   size_t size);
+#endif
+
+// Host- and device-side placement new overloads.
+void *operator new(__SIZE_TYPE__, void *p) { return p; }
+void *operator new[](__SIZE_TYPE__, void *p) { return p; }
+__device__ void *operator new(__SIZE_TYPE__, void *p) { return p; }
+__device__ void *operator new[](__SIZE_TYPE__, void *p) { return p; }
+
+#endif // !__NVCC__

diff  --git a/clang-tools-extra/test/clang-tidy/infrastructure/cuda-basic.cu 
b/clang-tools-extra/test/clang-tidy/infrastructure/cuda-basic.cu
new file mode 100644
index 0000000000000..fda58da96db83
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/cuda-basic.cu
@@ -0,0 +1,13 @@
+// RUN: %check_clang_tidy %s modernize-use-nullptr %t -- -- \
+// RUN:   --cuda-path=%S/Inputs/CUDA \
+// RUN:   -nocudalib -nocudainc -I %S/Inputs/CUDA
+
+#include <cuda.h>
+
+__global__ void kernel(int *p) { p = 0; }
+// CHECK-MESSAGES: :[[@LINE-1]]:38: warning: use nullptr 
[modernize-use-nullptr]
+// CHECK-FIXES: __global__ void kernel(int *p) { p = nullptr; }
+
+void *p = 0;
+// CHECK-MESSAGES: :[[@LINE-1]]:11: warning: use nullptr 
[modernize-use-nullptr]
+// CHECK-FIXES: void *p = nullptr;


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to