20#if !defined(_DEFAULT_FN_ATTRS)
21#if defined(__HIP__) || defined(__CUDA__)
22#define _DEFAULT_FN_ATTRS __attribute__((device))
24#define _DEFAULT_FN_ATTRS
30#if !defined(__cplusplus)
35_Pragma(
"omp begin declare target device_type(nohost)");
36_Pragma(
"omp begin declare variant match(device = {kind(gpu)})");
61#elif defined(__AMDGPU__)
63#elif defined(__SPIRV__)
65#elif !defined(_OPENMP)
66#error "This header is only meant to be used on GPU architectures."
69_Pragma(
"omp begin declare target device_type(nohost)");
70_Pragma(
"omp begin declare variant match(device = {kind(gpu)})");
73#define __gpu_kernel __attribute__((device_kernel, visibility("protected")))
138 return __builtin_ffsll(__lane_mask) - 1;
150 uint32_t __hi = (uint32_t)(__x >> 32ull);
151 uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFFull);
160 return __builtin_bit_cast(
162 __builtin_bit_cast(uint32_t, __x)));
168 return __builtin_bit_cast(
170 __builtin_bit_cast(uint64_t, __x)));
177 uint32_t __hi = (uint32_t)(__x >> 32ull);
178 uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
179 uint32_t __mask = (uint32_t)__lane_mask;
189 return __builtin_bit_cast(
191 __builtin_bit_cast(uint32_t, __x), __width));
198 return __builtin_bit_cast(
201 __builtin_bit_cast(uint64_t, __x), __width));
210#define __DO_LANE_OP(__type, __op, __identity, __prefix, __suffix) \
211 _DEFAULT_FN_ATTRS static __inline__ __type \
212 __gpu_suffix_scan_##__prefix##_##__suffix(uint64_t __lane_mask, \
214 uint64_t __above = __lane_mask & -(2ull << __gpu_lane_id()); \
215 for (uint32_t __step = 1; __step < __gpu_num_lanes(); __step *= 2) { \
216 uint32_t __src = __above ? __builtin_ctzg(__above) : __gpu_lane_id(); \
217 __type __result = __gpu_shuffle_idx_##__suffix(__lane_mask, __src, __x, \
218 __gpu_num_lanes()); \
219 __x = __x __op(__above ? __result : (__type)__identity); \
220 for (uint32_t __i = 0; __i < __step; ++__i) \
221 __above &= __above - 1; \
226 _DEFAULT_FN_ATTRS static __inline__ __type \
227 __gpu_prefix_scan_##__prefix##_##__suffix(uint64_t __lane_mask, \
229 uint64_t __below = __lane_mask & ((1ull << __gpu_lane_id()) - 1); \
230 for (uint32_t __step = 1; __step < __gpu_num_lanes(); __step *= 2) { \
232 __below ? (63 - __builtin_clzg(__below)) : __gpu_lane_id(); \
233 __type __result = __gpu_shuffle_idx_##__suffix(__lane_mask, __src, __x, \
234 __gpu_num_lanes()); \
235 __x = __x __op(__below ? __result : (__type)__identity); \
236 for (uint32_t __i = 0; __i < __step; ++__i) \
237 __below ^= (1ull << (63 - __builtin_clzg(__below, 0))) & __below; \
242 _DEFAULT_FN_ATTRS static __inline__ __type \
243 __gpu_lane_##__prefix##_##__suffix(uint64_t __lane_mask, __type __x) { \
244 return __gpu_read_first_lane_##__suffix( \
246 __gpu_suffix_scan_##__prefix##_##__suffix(__lane_mask, __x)); \
257 uint64_t __match_mask = 0;
260 for (uint64_t __active_mask = __lane_mask; __active_mask;
266 if (__first == __x) {
278 uint64_t __match_mask = 0;
281 for (uint64_t __active_mask = __lane_mask; __active_mask;
287 if (__first == __x) {
302 return __ballot == __lane_mask ? __lane_mask : 0ull;
311 return __ballot == __lane_mask ? __lane_mask : 0ull;
314_Pragma(
"omp end declare variant");
315_Pragma(
"omp end declare target");
317#if !defined(__cplusplus)
321#undef _DEFAULT_FN_ATTRS
__DEVICE__ unsigned int __ballot(int __a)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_x(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_lane_id(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_z(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x, uint32_t __width)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_y(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_z(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_x(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_z(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_y(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_x(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_lanes(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_y(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_z(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_x(void)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask, bool __x)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_y(void)
#define _DEFAULT_FN_ATTRS
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id(int __dim)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_any_u32_impl(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x, uint32_t __width)
static _DEFAULT_FN_ATTRS __inline__ double __gpu_read_first_lane_f64(uint64_t __lane_mask, double __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_all_u64_impl(uint64_t __lane_mask, uint64_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_all_u32_impl(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ bool __gpu_is_first_in_lane(uint64_t __lane_mask)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads(int __dim)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id(int __dim)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_first_lane_id(uint64_t __lane_mask)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_any_u64_impl(uint64_t __lane_mask, uint64_t __x)
static _DEFAULT_FN_ATTRS __inline__ float __gpu_shuffle_idx_f32(uint64_t __lane_mask, uint32_t __idx, float __x, uint32_t __width)
#define __DO_LANE_OP(__type, __op, __identity, __prefix, __suffix)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks(int __dim)
static _DEFAULT_FN_ATTRS __inline__ float __gpu_read_first_lane_f32(uint64_t __lane_mask, float __x)
static _DEFAULT_FN_ATTRS __inline__ double __gpu_shuffle_idx_f64(uint64_t __lane_mask, uint32_t __idx, double __x, uint32_t __width)
_Pragma("push_macro(\"bool\")")