clang 22.0.0git
nvptxintrin.h
Go to the documentation of this file.
1//===-- nvptxintrin.h - NVPTX intrinsic functions -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef __NVPTXINTRIN_H
10#define __NVPTXINTRIN_H
11
12#ifndef __NVPTX__
13#error "This file is intended for NVPTX targets or offloading to NVPTX"
14#endif
15
16#ifndef __GPUINTRIN_H
17#error "Never use <nvptxintrin.h> directly; include <gpuintrin.h> instead"
18#endif
19
20#ifndef __CUDA_ARCH__
21#define __CUDA_ARCH__ 0
22#endif
23
24_Pragma("omp begin declare target device_type(nohost)");
25_Pragma("omp begin declare variant match(device = {arch(nvptx64)})");
26
27// Type aliases to the address spaces used by the NVPTX backend.
28#define __gpu_private __attribute__((address_space(5)))
29#define __gpu_constant __attribute__((address_space(4)))
30#define __gpu_local __attribute__((address_space(3)))
31#define __gpu_global __attribute__((address_space(1)))
32#define __gpu_generic __attribute__((address_space(0)))
33
34// Attribute to declare a function as a kernel.
35#define __gpu_kernel __attribute__((nvptx_kernel, visibility("protected")))
36
37// Returns the number of CUDA blocks in the 'x' dimension.
38_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_x(void) {
39 return __nvvm_read_ptx_sreg_nctaid_x();
40}
41
42// Returns the number of CUDA blocks in the 'y' dimension.
43_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_y(void) {
44 return __nvvm_read_ptx_sreg_nctaid_y();
45}
46
47// Returns the number of CUDA blocks in the 'z' dimension.
48_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_z(void) {
49 return __nvvm_read_ptx_sreg_nctaid_z();
50}
51
52// Returns the 'x' dimension of the current CUDA block's id.
53_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_x(void) {
54 return __nvvm_read_ptx_sreg_ctaid_x();
55}
56
57// Returns the 'y' dimension of the current CUDA block's id.
58_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_y(void) {
59 return __nvvm_read_ptx_sreg_ctaid_y();
60}
61
62// Returns the 'z' dimension of the current CUDA block's id.
63_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_z(void) {
64 return __nvvm_read_ptx_sreg_ctaid_z();
65}
66
67// Returns the number of CUDA threads in the 'x' dimension.
68_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_x(void) {
69 return __nvvm_read_ptx_sreg_ntid_x();
70}
71
72// Returns the number of CUDA threads in the 'y' dimension.
73_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_y(void) {
74 return __nvvm_read_ptx_sreg_ntid_y();
75}
76
77// Returns the number of CUDA threads in the 'z' dimension.
78_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_z(void) {
79 return __nvvm_read_ptx_sreg_ntid_z();
80}
81
82// Returns the 'x' dimension id of the thread in the current CUDA block.
83_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_x(void) {
84 return __nvvm_read_ptx_sreg_tid_x();
85}
86
87// Returns the 'y' dimension id of the thread in the current CUDA block.
88_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_y(void) {
89 return __nvvm_read_ptx_sreg_tid_y();
90}
91
92// Returns the 'z' dimension id of the thread in the current CUDA block.
93_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_z(void) {
94 return __nvvm_read_ptx_sreg_tid_z();
95}
96
97// Returns the size of a CUDA warp, always 32 on NVIDIA hardware.
98_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_lanes(void) {
99 return __nvvm_read_ptx_sreg_warpsize();
100}
101
102// Returns the id of the thread inside of a CUDA warp executing together.
103_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_lane_id(void) {
104 return __nvvm_read_ptx_sreg_laneid();
105}
106
107// Returns the bit-mask of active threads in the current warp.
108_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_lane_mask(void) {
109 return __nvvm_activemask();
110}
111
112// Copies the value from the first active thread in the warp to the rest.
113_DEFAULT_FN_ATTRS static __inline__ uint32_t
114__gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) {
115 uint32_t __mask = (uint32_t)__lane_mask;
116 uint32_t __id = __builtin_ffs(__mask) - 1;
117 return __nvvm_shfl_sync_idx_i32(__mask, __x, __id, __gpu_num_lanes() - 1);
118}
119
120// Returns a bitmask of threads in the current lane for which \p x is true.
121_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask,
122 bool __x) {
123 uint32_t __mask = (uint32_t)__lane_mask;
124 return __nvvm_vote_ballot_sync(__mask, __x);
125}
126
127// Waits for all the threads in the block to converge and issues a fence.
128_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_threads(void) {
129 __syncthreads();
130}
131
132// Waits for all threads in the warp to reconverge for independent scheduling.
133_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_lane(uint64_t __lane_mask) {
134 __nvvm_bar_warp_sync((uint32_t)__lane_mask);
135}
136
137// Shuffles the the lanes inside the warp according to the given index.
138_DEFAULT_FN_ATTRS static __inline__ uint32_t
139__gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x,
140 uint32_t __width) {
141 // Mask out inactive lanes to match AMDGPU behavior.
142 uint32_t __mask = (uint32_t)__lane_mask;
143 bool __bitmask = (1ull << __idx) & __lane_mask;
144 return -__bitmask &
145 __nvvm_shfl_sync_idx_i32(__mask, __x, __idx,
146 ((__gpu_num_lanes() - __width) << 8u) | 0x1f);
147}
148
149// Returns a bitmask marking all lanes that have the same value of __x.
150_DEFAULT_FN_ATTRS static __inline__ uint64_t
151__gpu_match_any_u32(uint64_t __lane_mask, uint32_t __x) {
152 // Newer targets can use the dedicated CUDA support.
153#if __CUDA_ARCH__ >= 700
154 return __nvvm_match_any_sync_i32(__lane_mask, __x);
155#else
156 return __gpu_match_any_u32_impl(__lane_mask, __x);
157#endif
158}
159
160// Returns a bitmask marking all lanes that have the same value of __x.
161_DEFAULT_FN_ATTRS static __inline__ uint64_t
162__gpu_match_any_u64(uint64_t __lane_mask, uint64_t __x) {
163 // Newer targets can use the dedicated CUDA support.
164#if __CUDA_ARCH__ >= 700
165 return __nvvm_match_any_sync_i64(__lane_mask, __x);
166#else
167 return __gpu_match_any_u64_impl(__lane_mask, __x);
168#endif
169}
170
171// Returns the current lane mask if every lane contains __x.
172_DEFAULT_FN_ATTRS static __inline__ uint64_t
173__gpu_match_all_u32(uint64_t __lane_mask, uint32_t __x) {
174 // Newer targets can use the dedicated CUDA support.
175#if __CUDA_ARCH__ >= 700
176 int predicate;
177 return __nvvm_match_all_sync_i32p(__lane_mask, __x, &predicate);
178#else
179 return __gpu_match_all_u32_impl(__lane_mask, __x);
180#endif
181}
182
183// Returns the current lane mask if every lane contains __x.
184_DEFAULT_FN_ATTRS static __inline__ uint64_t
185__gpu_match_all_u64(uint64_t __lane_mask, uint64_t __x) {
186 // Newer targets can use the dedicated CUDA support.
187#if __CUDA_ARCH__ >= 700
188 int predicate;
189 return __nvvm_match_all_sync_i64p(__lane_mask, __x, &predicate);
190#else
191 return __gpu_match_all_u64_impl(__lane_mask, __x);
192#endif
193}
194
195// Returns true if the flat pointer points to CUDA 'shared' memory.
196_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_local(void *ptr) {
197 return __nvvm_isspacep_shared(ptr);
198}
199
200// Returns true if the flat pointer points to CUDA 'local' memory.
201_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_private(void *ptr) {
202 return __nvvm_isspacep_local(ptr);
203}
204
205// Terminates execution of the calling thread.
206_DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void __gpu_exit(void) {
207 __nvvm_exit();
208}
209
210// Suspend the thread briefly to assist the scheduler during busy loops.
211_DEFAULT_FN_ATTRS static __inline__ void __gpu_thread_suspend(void) {
212 if (__nvvm_reflect("__CUDA_ARCH") >= 700)
213 asm("nanosleep.u32 64;" ::: "memory");
214}
215
216_Pragma("omp end declare variant");
217_Pragma("omp end declare target");
218
219#endif // __NVPTXINTRIN_H
#define _DEFAULT_FN_ATTRS
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_any_u32_impl(uint64_t __lane_mask, uint32_t __x)
Definition gpuintrin.h:263
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_all_u64_impl(uint64_t __lane_mask, uint64_t __x)
Definition gpuintrin.h:312
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_all_u32_impl(uint64_t __lane_mask, uint32_t __x)
Definition gpuintrin.h:303
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_any_u64_impl(uint64_t __lane_mask, uint64_t __x)
Definition gpuintrin.h:283
static _DEFAULT_FN_ATTRS __inline__ void __gpu_thread_suspend(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_x(void)
Definition nvptxintrin.h:83
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_lane_id(void)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_any_u32(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_z(void)
Definition nvptxintrin.h:78
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_lane_mask(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x, uint32_t __width)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_y(void)
Definition nvptxintrin.h:58
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_z(void)
Definition nvptxintrin.h:93
static _DEFAULT_FN_ATTRS __inline__ void __gpu_sync_lane(uint64_t __lane_mask)
static _DEFAULT_FN_ATTRS __inline__ bool __gpu_is_ptr_private(void *ptr)
static _DEFAULT_FN_ATTRS __inline__ bool __gpu_is_ptr_local(void *ptr)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_x(void)
Definition nvptxintrin.h:38
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_z(void)
Definition nvptxintrin.h:63
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_all_u32(uint64_t __lane_mask, uint32_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_y(void)
Definition nvptxintrin.h:73
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_threads_x(void)
Definition nvptxintrin.h:68
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_lanes(void)
Definition nvptxintrin.h:98
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_all_u64(uint64_t __lane_mask, uint64_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_thread_id_y(void)
Definition nvptxintrin.h:88
_Pragma("omp begin declare target device_type(nohost)")
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_z(void)
Definition nvptxintrin.h:48
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_block_id_x(void)
Definition nvptxintrin.h:53
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_match_any_u64(uint64_t __lane_mask, uint64_t __x)
static _DEFAULT_FN_ATTRS __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask, bool __x)
static _DEFAULT_FN_ATTRS __inline__ void __gpu_exit(void)
static _DEFAULT_FN_ATTRS __inline__ void __gpu_sync_threads(void)
static _DEFAULT_FN_ATTRS __inline__ uint32_t __gpu_num_blocks_y(void)
Definition nvptxintrin.h:43
#define noreturn
Definition stdnoreturn.h:17