================ @@ -0,0 +1,86 @@ +//===-- gpuintrin.h - Generic GPU intrinsic functions ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Provides wrappers around the clang builtins for accessing GPU hardware +// features. The interface is intended to be portable between architectures, but +// some targets may provide different implementations. This header can be +// included for all the common GPU programming languages, namely OpenMP, HIP, +// CUDA, and OpenCL. +// +//===----------------------------------------------------------------------===// + +#ifndef __GPUINTRIN_H +#define __GPUINTRIN_H + +#if defined(__NVPTX__) +#include <nvptxintrin.h> +#elif defined(__AMDGPU__) +#include <amdgpuintrin.h> +#else +#error "This header is only meant to be used on GPU architectures." +#endif + +// Returns the total number of blocks / workgroups. +_DEFAULT_ATTRS static inline uint64_t __gpu_num_blocks() { + return __gpu_num_blocks_x() * __gpu_num_blocks_y() * __gpu_num_blocks_z(); +} + +// Returns the absolute id of the block / workgroup. +_DEFAULT_ATTRS static inline uint64_t __gpu_block_id() { + return __gpu_block_id_x() + + (uint64_t)__gpu_num_blocks_x() * __gpu_block_id_y() + + (uint64_t)__gpu_num_blocks_x() * __gpu_num_blocks_y() * + __gpu_block_id_z(); +} + +// Returns the total number of threads in the block / workgroup. +_DEFAULT_ATTRS static inline uint32_t __gpu_num_threads() { + return __gpu_num_threads_x() * __gpu_num_threads_y() * __gpu_num_threads_z(); +} + +// Returns the absolute id of the thread in the current block / workgroup. +_DEFAULT_ATTRS static inline uint32_t __gpu_thread_id() { + return __gpu_thread_id_x() + __gpu_num_threads_x() * __gpu_thread_id_y() + + __gpu_num_threads_x() * __gpu_num_threads_y() * __gpu_thread_id_z(); +} + +// Get the first active thread inside the lane. +_DEFAULT_ATTRS static inline uint64_t +__gpu_first_lane_id(uint64_t __lane_mask) { + return __builtin_ffsll(__lane_mask) - 1; +} + +// Conditional that is only true for a single thread in a lane. +_DEFAULT_ATTRS static inline bool __gpu_is_first_lane(uint64_t __lane_mask) { + return __gpu_lane_id() == __gpu_first_lane_id(__lane_mask); +} + +// Gets the sum of all lanes inside the warp or wavefront. +_DEFAULT_ATTRS static inline uint32_t +__gpu_lane_reduce_u32(uint64_t __lane_mask, uint32_t x) { + for (uint32_t step = __gpu_num_lanes() / 2; step > 0; step /= 2) { + uint32_t index = step + __gpu_lane_id(); + x += __gpu_shuffle_idx_u32(__lane_mask, index, x); + } + return __gpu_broadcast_u32(__lane_mask, x); +} + +// Gets the accumulator scan of the threads in the warp or wavefront. +_DEFAULT_ATTRS static inline uint32_t __gpu_lane_scan_u32(uint64_t __lane_mask, + uint32_t x) { + for (uint32_t step = 1; step < __gpu_num_lanes(); step *= 2) { + uint32_t index = __gpu_lane_id() - step; + uint32_t bitmask = __gpu_lane_id() >= step; + x += -bitmask & __gpu_shuffle_idx_u32(__lane_mask, index, x); + } + return x; +} ---------------- jhuber6 wrote:
I'll need to look at how to do that on AMDGPU, maybe can add it in a follow-up. https://github.com/llvm/llvm-project/pull/110179 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits