20#if !defined(_DEFAULT_FN_ATTRS)
21#if defined(__HIP__) || defined(__CUDA__)
22#define _DEFAULT_FN_ATTRS __attribute__((device))
24#define _DEFAULT_FN_ATTRS
30#if !defined(__cplusplus)
35_Pragma(
"omp begin declare target device_type(nohost)");
36_Pragma(
"omp begin declare variant match(device = {kind(gpu)})");
61#elif defined(__AMDGPU__)
63#elif defined(__SPIRV__)
65#elif !defined(_OPENMP)
66#error "This header is only meant to be used on GPU architectures."
69_Pragma(
"omp begin declare target device_type(nohost)");
70_Pragma(
"omp begin declare variant match(device = {kind(gpu)})");
73#define __gpu_kernel __attribute__((device_kernel, visibility("protected")))
138 return __builtin_ffsll(__lane_mask) - 1;
150 uint32_t __hi = (uint32_t)(__x >> 32);
151 uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
159 return __builtin_bit_cast(
161 __builtin_bit_cast(uint32_t, __x)));
167 return __builtin_bit_cast(
169 __builtin_bit_cast(uint64_t, __x)));
176 uint32_t __hi = (uint32_t)(__x >> 32);
177 uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
178 uint32_t __mask = (uint32_t)__lane_mask;
187 return __builtin_bit_cast(
189 __builtin_bit_cast(uint32_t, __x), __width));
196 return __builtin_bit_cast(
199 __builtin_bit_cast(uint64_t, __x), __width));
208#define __DO_LANE_OPS(__type, __op, __identity, __prefix, __suffix) \
209 _DEFAULT_FN_ATTRS static __inline__ __type \
210 __gpu_suffix_scan_##__prefix##_##__suffix(uint64_t __lane_mask, \
212 uint64_t __above = __lane_mask & -(UINT64_C(2) << __gpu_lane_id()); \
213 for (uint32_t __step = 1; __step < __gpu_num_lanes(); __step *= 2) { \
214 uint32_t __src = __builtin_ctzg(__above, (int)sizeof(__above) * 8); \
215 __type __result = __gpu_shuffle_idx_##__suffix(__lane_mask, __src, __x, \
216 __gpu_num_lanes()); \
217 __x = __op(__x, __above ? __result : (__type)__identity); \
218 for (uint32_t __i = 0; __i < __step; ++__i) \
219 __above &= __above - 1; \
224 _DEFAULT_FN_ATTRS static __inline__ __type \
225 __gpu_prefix_scan_##__prefix##_##__suffix(uint64_t __lane_mask, \
227 uint64_t __below = __lane_mask & ((UINT64_C(1) << __gpu_lane_id()) - 1); \
228 for (uint32_t __step = 1; __step < __gpu_num_lanes(); __step *= 2) { \
229 uint32_t __src = 63 - __builtin_clzg(__below, (int)sizeof(__below) * 8); \
230 __type __result = __gpu_shuffle_idx_##__suffix(__lane_mask, __src, __x, \
231 __gpu_num_lanes()); \
232 __x = __op(__x, __below ? __result : (__type)__identity); \
233 for (uint32_t __i = 0; __i < __step; ++__i) \
235 (UINT64_C(1) << (63 - __builtin_clzg(__below, 0))) & __below; \
240 _DEFAULT_FN_ATTRS static __inline__ __type \
241 __gpu_lane_##__prefix##_##__suffix(uint64_t __lane_mask, __type __x) { \
242 return __gpu_read_first_lane_##__suffix( \
244 __gpu_suffix_scan_##__prefix##_##__suffix(__lane_mask, __x)); \
247#define __GPU_OP(__x, __y) ((__x) + (__y))
254#define __GPU_OP(__x, __y) ((__x) & (__y))
259#define __GPU_OP(__x, __y) ((__x) | (__y))
264#define __GPU_OP(__x, __y) ((__x) ^ (__y))
269#define __GPU_OP(__x, __y) ((__x) < (__y) ? (__x) : (__y))
274#define __GPU_OP(__x, __y) ((__x) > (__y) ? (__x) : (__y))
279#define __GPU_OP(__x, __y) __builtin_elementwise_minnum((__x), (__y))
284#define __GPU_OP(__x, __y) __builtin_elementwise_maxnum((__x), (__y))
294 uint64_t __match_mask = 0;
297 for (uint64_t __active_mask = __lane_mask; __active_mask;
303 if (__first == __x) {
315 uint64_t __match_mask = 0;
318 for (uint64_t __active_mask = __lane_mask; __active_mask;
324 if (__first == __x) {
339 return __ballot == __lane_mask ? __lane_mask : UINT64_C(0);
348 return __ballot == __lane_mask ? __lane_mask : UINT64_C(0);
351_Pragma(
"omp end declare variant");
352_Pragma(
"omp end declare target");
354#if !defined(__cplusplus)
358#undef _DEFAULT_FN_ATTRS
__DEVICE__ unsigned int __ballot(int __a)
__DEVICE__ int min(int __a, int __b)
__DEVICE__ int max(int __a, int __b)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_x(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_lane_id(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_z(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x, uint32_t __width)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_y(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_z(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_x(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_z(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_y(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_x(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_lanes(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_y(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_z(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_x(void)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask, bool __x)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_y(void)
#define _DEFAULT_FN_ATTRS
#define __GPU_OP(__x, __y)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id(int __dim)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_any_u32_impl(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x, uint32_t __width)
static _DEFAULT_FN_ATTRS __inline__ double __gpu_read_first_lane_f64(uint64_t __lane_mask, double __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_all_u64_impl(uint64_t __lane_mask, uint64_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_all_u32_impl(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ bool __gpu_is_first_in_lane(uint64_t __lane_mask)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads(int __dim)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id(int __dim)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_first_lane_id(uint64_t __lane_mask)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_any_u64_impl(uint64_t __lane_mask, uint64_t __x)
static _DEFAULT_FN_ATTRS __inline__ float __gpu_shuffle_idx_f32(uint64_t __lane_mask, uint32_t __idx, float __x, uint32_t __width)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks(int __dim)
static _DEFAULT_FN_ATTRS __inline__ float __gpu_read_first_lane_f32(uint64_t __lane_mask, float __x)
#define __DO_LANE_OPS(__type, __op, __identity, __prefix, __suffix)
static _DEFAULT_FN_ATTRS __inline__ double __gpu_shuffle_idx_f64(uint64_t __lane_mask, uint32_t __idx, double __x, uint32_t __width)
_Pragma("push_macro(\"bool\")")