clang  6.0.0svn
__clang_cuda_intrinsics.h
Go to the documentation of this file.
1 /*===--- __clang_cuda_intrinsics.h - Device-side CUDA intrinsic wrappers ---===
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to deal
5  * in the Software without restriction, including without limitation the rights
6  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7  * copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19  * THE SOFTWARE.
20  *
21  *===-----------------------------------------------------------------------===
22  */
23 #ifndef __CLANG_CUDA_INTRINSICS_H__
24 #define __CLANG_CUDA_INTRINSICS_H__
25 #ifndef __CUDA__
26 #error "This file is for CUDA compilation only."
27 #endif
28 
29 // sm_30 intrinsics: __shfl_{up,down,xor}.
30 
31 #define __SM_30_INTRINSICS_H__
32 #define __SM_30_INTRINSICS_HPP__
33 
34 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
35 
36 #pragma push_macro("__MAKE_SHUFFLES")
37 #define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask) \
38  inline __device__ int __FnName(int __val, int __offset, \
39  int __width = warpSize) { \
40  return __IntIntrinsic(__val, __offset, \
41  ((warpSize - __width) << 8) | (__Mask)); \
42  } \
43  inline __device__ float __FnName(float __val, int __offset, \
44  int __width = warpSize) { \
45  return __FloatIntrinsic(__val, __offset, \
46  ((warpSize - __width) << 8) | (__Mask)); \
47  } \
48  inline __device__ unsigned int __FnName(unsigned int __val, int __offset, \
49  int __width = warpSize) { \
50  return static_cast<unsigned int>( \
51  ::__FnName(static_cast<int>(__val), __offset, __width)); \
52  } \
53  inline __device__ long long __FnName(long long __val, int __offset, \
54  int __width = warpSize) { \
55  struct __Bits { \
56  int __a, __b; \
57  }; \
58  _Static_assert(sizeof(__val) == sizeof(__Bits)); \
59  _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \
60  __Bits __tmp; \
61  memcpy(&__val, &__tmp, sizeof(__val)); \
62  __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \
63  __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \
64  long long __ret; \
65  memcpy(&__ret, &__tmp, sizeof(__tmp)); \
66  return __ret; \
67  } \
68  inline __device__ unsigned long long __FnName( \
69  unsigned long long __val, int __offset, int __width = warpSize) { \
70  return static_cast<unsigned long long>(::__FnName( \
71  static_cast<unsigned long long>(__val), __offset, __width)); \
72  } \
73  inline __device__ double __FnName(double __val, int __offset, \
74  int __width = warpSize) { \
75  long long __tmp; \
76  _Static_assert(sizeof(__tmp) == sizeof(__val)); \
77  memcpy(&__tmp, &__val, sizeof(__val)); \
78  __tmp = ::__FnName(__tmp, __offset, __width); \
79  double __ret; \
80  memcpy(&__ret, &__tmp, sizeof(__ret)); \
81  return __ret; \
82  }
83 
84 __MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f);
85 // We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
86 // maxLane.
87 __MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0);
88 __MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f);
89 __MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f);
90 
91 #pragma pop_macro("__MAKE_SHUFFLES")
92 
93 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
94 
95 #if CUDA_VERSION >= 9000
96 #if (!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300)
97 // __shfl_sync_* variants available in CUDA-9
98 #pragma push_macro("__MAKE_SYNC_SHUFFLES")
99 #define __MAKE_SYNC_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, \
100  __Mask) \
101  inline __device__ int __FnName(unsigned int __mask, int __val, int __offset, \
102  int __width = warpSize) { \
103  return __IntIntrinsic(__mask, __val, __offset, \
104  ((warpSize - __width) << 8) | (__Mask)); \
105  } \
106  inline __device__ float __FnName(unsigned int __mask, float __val, \
107  int __offset, int __width = warpSize) { \
108  return __FloatIntrinsic(__mask, __val, __offset, \
109  ((warpSize - __width) << 8) | (__Mask)); \
110  } \
111  inline __device__ unsigned int __FnName(unsigned int __mask, \
112  unsigned int __val, int __offset, \
113  int __width = warpSize) { \
114  return static_cast<unsigned int>( \
115  ::__FnName(__mask, static_cast<int>(__val), __offset, __width)); \
116  } \
117  inline __device__ long long __FnName(unsigned int __mask, long long __val, \
118  int __offset, int __width = warpSize) { \
119  struct __Bits { \
120  int __a, __b; \
121  }; \
122  _Static_assert(sizeof(__val) == sizeof(__Bits)); \
123  _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \
124  __Bits __tmp; \
125  memcpy(&__val, &__tmp, sizeof(__val)); \
126  __tmp.__a = ::__FnName(__mask, __tmp.__a, __offset, __width); \
127  __tmp.__b = ::__FnName(__mask, __tmp.__b, __offset, __width); \
128  long long __ret; \
129  memcpy(&__ret, &__tmp, sizeof(__tmp)); \
130  return __ret; \
131  } \
132  inline __device__ unsigned long long __FnName( \
133  unsigned int __mask, unsigned long long __val, int __offset, \
134  int __width = warpSize) { \
135  return static_cast<unsigned long long>(::__FnName( \
136  __mask, static_cast<unsigned long long>(__val), __offset, __width)); \
137  } \
138  inline __device__ long __FnName(unsigned int __mask, long __val, \
139  int __offset, int __width = warpSize) { \
140  _Static_assert(sizeof(long) == sizeof(long long) || \
141  sizeof(long) == sizeof(int)); \
142  if (sizeof(long) == sizeof(long long)) { \
143  return static_cast<long>(::__FnName( \
144  __mask, static_cast<long long>(__val), __offset, __width)); \
145  } else if (sizeof(long) == sizeof(int)) { \
146  return static_cast<long>( \
147  ::__FnName(__mask, static_cast<int>(__val), __offset, __width)); \
148  } \
149  } \
150  inline __device__ unsigned long __FnName(unsigned int __mask, \
151  unsigned long __val, int __offset, \
152  int __width = warpSize) { \
153  return static_cast<unsigned long>( \
154  ::__FnName(__mask, static_cast<long>(__val), __offset, __width)); \
155  } \
156  inline __device__ double __FnName(unsigned int __mask, double __val, \
157  int __offset, int __width = warpSize) { \
158  long long __tmp; \
159  _Static_assert(sizeof(__tmp) == sizeof(__val)); \
160  memcpy(&__tmp, &__val, sizeof(__val)); \
161  __tmp = ::__FnName(__mask, __tmp, __offset, __width); \
162  double __ret; \
163  memcpy(&__ret, &__tmp, sizeof(__ret)); \
164  return __ret; \
165  }
166 __MAKE_SYNC_SHUFFLES(__shfl_sync, __nvvm_shfl_sync_idx_i32,
167  __nvvm_shfl_sync_idx_f32, 0x1f);
168 // We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
169 // maxLane.
170 __MAKE_SYNC_SHUFFLES(__shfl_up_sync, __nvvm_shfl_sync_up_i32,
171  __nvvm_shfl_sync_up_f32, 0);
172 __MAKE_SYNC_SHUFFLES(__shfl_down_sync, __nvvm_shfl_sync_down_i32,
173  __nvvm_shfl_sync_down_f32, 0x1f);
174 __MAKE_SYNC_SHUFFLES(__shfl_xor_sync, __nvvm_shfl_sync_bfly_i32,
175  __nvvm_shfl_sync_bfly_f32, 0x1f);
176 #pragma pop_macro("__MAKE_SYNC_SHUFFLES")
177 
178 inline __device__ void __syncwarp(unsigned int mask = 0xffffffff) {
179  return __nvvm_bar_warp_sync(mask);
180 }
181 
182 inline __device__ void __barrier_sync(unsigned int id) {
183  __nvvm_barrier_sync(id);
184 }
185 
186 inline __device__ void __barrier_sync_count(unsigned int id,
187  unsigned int count) {
188  __nvvm_barrier_sync_cnt(id, count);
189 }
190 
191 inline __device__ int __all_sync(unsigned int mask, int pred) {
192  return __nvvm_vote_all_sync(mask, pred);
193 }
194 
195 inline __device__ int __any_sync(unsigned int mask, int pred) {
196  return __nvvm_vote_any_sync(mask, pred);
197 }
198 
199 inline __device__ int __uni_sync(unsigned int mask, int pred) {
200  return __nvvm_vote_uni_sync(mask, pred);
201 }
202 
203 inline __device__ unsigned int __ballot_sync(unsigned int mask, int pred) {
204  return __nvvm_vote_ballot_sync(mask, pred);
205 }
206 
207 inline __device__ unsigned int __activemask() { return __nvvm_vote_ballot(1); }
208 
209 inline __device__ unsigned int __fns(unsigned mask, unsigned base, int offset) {
210  return __nvvm_fns(mask, base, offset);
211 }
212 
213 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
214 
215 // Define __match* builtins CUDA-9 headers expect to see.
216 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
217 inline __device__ unsigned int __match32_any_sync(unsigned int mask,
218  unsigned int value) {
219  return __nvvm_match_any_sync_i32(mask, value);
220 }
221 
222 inline __device__ unsigned long long
223 __match64_any_sync(unsigned int mask, unsigned long long value) {
224  return __nvvm_match_any_sync_i64(mask, value);
225 }
226 
227 inline __device__ unsigned int
228 __match32_all_sync(unsigned int mask, unsigned int value, int *pred) {
229  return __nvvm_match_all_sync_i32p(mask, value, pred);
230 }
231 
232 inline __device__ unsigned long long
233 __match64_all_sync(unsigned int mask, unsigned long long value, int *pred) {
234  return __nvvm_match_all_sync_i64p(mask, value, pred);
235 }
236 #include "crt/sm_70_rt.hpp"
237 
238 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
239 #endif // __CUDA_VERSION >= 9000
240 
241 // sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}.
242 
243 // Prevent the vanilla sm_32 intrinsics header from being included.
244 #define __SM_32_INTRINSICS_H__
245 #define __SM_32_INTRINSICS_HPP__
246 
247 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
248 
249 inline __device__ char __ldg(const char *ptr) { return __nvvm_ldg_c(ptr); }
250 inline __device__ short __ldg(const short *ptr) { return __nvvm_ldg_s(ptr); }
251 inline __device__ int __ldg(const int *ptr) { return __nvvm_ldg_i(ptr); }
252 inline __device__ long __ldg(const long *ptr) { return __nvvm_ldg_l(ptr); }
253 inline __device__ long long __ldg(const long long *ptr) {
254  return __nvvm_ldg_ll(ptr);
255 }
256 inline __device__ unsigned char __ldg(const unsigned char *ptr) {
257  return __nvvm_ldg_uc(ptr);
258 }
259 inline __device__ unsigned short __ldg(const unsigned short *ptr) {
260  return __nvvm_ldg_us(ptr);
261 }
262 inline __device__ unsigned int __ldg(const unsigned int *ptr) {
263  return __nvvm_ldg_ui(ptr);
264 }
265 inline __device__ unsigned long __ldg(const unsigned long *ptr) {
266  return __nvvm_ldg_ul(ptr);
267 }
268 inline __device__ unsigned long long __ldg(const unsigned long long *ptr) {
269  return __nvvm_ldg_ull(ptr);
270 }
271 inline __device__ float __ldg(const float *ptr) { return __nvvm_ldg_f(ptr); }
272 inline __device__ double __ldg(const double *ptr) { return __nvvm_ldg_d(ptr); }
273 
274 inline __device__ char2 __ldg(const char2 *ptr) {
275  typedef char c2 __attribute__((ext_vector_type(2)));
276  // We can assume that ptr is aligned at least to char2's alignment, but the
277  // load will assume that ptr is aligned to char2's alignment. This is only
278  // safe if alignof(c2) <= alignof(char2).
279  c2 rv = __nvvm_ldg_c2(reinterpret_cast<const c2 *>(ptr));
280  char2 ret;
281  ret.x = rv[0];
282  ret.y = rv[1];
283  return ret;
284 }
285 inline __device__ char4 __ldg(const char4 *ptr) {
286  typedef char c4 __attribute__((ext_vector_type(4)));
287  c4 rv = __nvvm_ldg_c4(reinterpret_cast<const c4 *>(ptr));
288  char4 ret;
289  ret.x = rv[0];
290  ret.y = rv[1];
291  ret.z = rv[2];
292  ret.w = rv[3];
293  return ret;
294 }
295 inline __device__ short2 __ldg(const short2 *ptr) {
296  typedef short s2 __attribute__((ext_vector_type(2)));
297  s2 rv = __nvvm_ldg_s2(reinterpret_cast<const s2 *>(ptr));
298  short2 ret;
299  ret.x = rv[0];
300  ret.y = rv[1];
301  return ret;
302 }
303 inline __device__ short4 __ldg(const short4 *ptr) {
304  typedef short s4 __attribute__((ext_vector_type(4)));
305  s4 rv = __nvvm_ldg_s4(reinterpret_cast<const s4 *>(ptr));
306  short4 ret;
307  ret.x = rv[0];
308  ret.y = rv[1];
309  ret.z = rv[2];
310  ret.w = rv[3];
311  return ret;
312 }
313 inline __device__ int2 __ldg(const int2 *ptr) {
314  typedef int i2 __attribute__((ext_vector_type(2)));
315  i2 rv = __nvvm_ldg_i2(reinterpret_cast<const i2 *>(ptr));
316  int2 ret;
317  ret.x = rv[0];
318  ret.y = rv[1];
319  return ret;
320 }
321 inline __device__ int4 __ldg(const int4 *ptr) {
322  typedef int i4 __attribute__((ext_vector_type(4)));
323  i4 rv = __nvvm_ldg_i4(reinterpret_cast<const i4 *>(ptr));
324  int4 ret;
325  ret.x = rv[0];
326  ret.y = rv[1];
327  ret.z = rv[2];
328  ret.w = rv[3];
329  return ret;
330 }
331 inline __device__ longlong2 __ldg(const longlong2 *ptr) {
332  typedef long long ll2 __attribute__((ext_vector_type(2)));
333  ll2 rv = __nvvm_ldg_ll2(reinterpret_cast<const ll2 *>(ptr));
334  longlong2 ret;
335  ret.x = rv[0];
336  ret.y = rv[1];
337  return ret;
338 }
339 
340 inline __device__ uchar2 __ldg(const uchar2 *ptr) {
341  typedef unsigned char uc2 __attribute__((ext_vector_type(2)));
342  uc2 rv = __nvvm_ldg_uc2(reinterpret_cast<const uc2 *>(ptr));
343  uchar2 ret;
344  ret.x = rv[0];
345  ret.y = rv[1];
346  return ret;
347 }
348 inline __device__ uchar4 __ldg(const uchar4 *ptr) {
349  typedef unsigned char uc4 __attribute__((ext_vector_type(4)));
350  uc4 rv = __nvvm_ldg_uc4(reinterpret_cast<const uc4 *>(ptr));
351  uchar4 ret;
352  ret.x = rv[0];
353  ret.y = rv[1];
354  ret.z = rv[2];
355  ret.w = rv[3];
356  return ret;
357 }
358 inline __device__ ushort2 __ldg(const ushort2 *ptr) {
359  typedef unsigned short us2 __attribute__((ext_vector_type(2)));
360  us2 rv = __nvvm_ldg_us2(reinterpret_cast<const us2 *>(ptr));
361  ushort2 ret;
362  ret.x = rv[0];
363  ret.y = rv[1];
364  return ret;
365 }
366 inline __device__ ushort4 __ldg(const ushort4 *ptr) {
367  typedef unsigned short us4 __attribute__((ext_vector_type(4)));
368  us4 rv = __nvvm_ldg_us4(reinterpret_cast<const us4 *>(ptr));
369  ushort4 ret;
370  ret.x = rv[0];
371  ret.y = rv[1];
372  ret.z = rv[2];
373  ret.w = rv[3];
374  return ret;
375 }
376 inline __device__ uint2 __ldg(const uint2 *ptr) {
377  typedef unsigned int ui2 __attribute__((ext_vector_type(2)));
378  ui2 rv = __nvvm_ldg_ui2(reinterpret_cast<const ui2 *>(ptr));
379  uint2 ret;
380  ret.x = rv[0];
381  ret.y = rv[1];
382  return ret;
383 }
384 inline __device__ uint4 __ldg(const uint4 *ptr) {
385  typedef unsigned int ui4 __attribute__((ext_vector_type(4)));
386  ui4 rv = __nvvm_ldg_ui4(reinterpret_cast<const ui4 *>(ptr));
387  uint4 ret;
388  ret.x = rv[0];
389  ret.y = rv[1];
390  ret.z = rv[2];
391  ret.w = rv[3];
392  return ret;
393 }
394 inline __device__ ulonglong2 __ldg(const ulonglong2 *ptr) {
395  typedef unsigned long long ull2 __attribute__((ext_vector_type(2)));
396  ull2 rv = __nvvm_ldg_ull2(reinterpret_cast<const ull2 *>(ptr));
397  ulonglong2 ret;
398  ret.x = rv[0];
399  ret.y = rv[1];
400  return ret;
401 }
402 
403 inline __device__ float2 __ldg(const float2 *ptr) {
404  typedef float f2 __attribute__((ext_vector_type(2)));
405  f2 rv = __nvvm_ldg_f2(reinterpret_cast<const f2 *>(ptr));
406  float2 ret;
407  ret.x = rv[0];
408  ret.y = rv[1];
409  return ret;
410 }
411 inline __device__ float4 __ldg(const float4 *ptr) {
412  typedef float f4 __attribute__((ext_vector_type(4)));
413  f4 rv = __nvvm_ldg_f4(reinterpret_cast<const f4 *>(ptr));
414  float4 ret;
415  ret.x = rv[0];
416  ret.y = rv[1];
417  ret.z = rv[2];
418  ret.w = rv[3];
419  return ret;
420 }
421 inline __device__ double2 __ldg(const double2 *ptr) {
422  typedef double d2 __attribute__((ext_vector_type(2)));
423  d2 rv = __nvvm_ldg_d2(reinterpret_cast<const d2 *>(ptr));
424  double2 ret;
425  ret.x = rv[0];
426  ret.y = rv[1];
427  return ret;
428 }
429 
430 // TODO: Implement these as intrinsics, so the backend can work its magic on
431 // these. Alternatively, we could implement these as plain C and try to get
432 // llvm to recognize the relevant patterns.
433 inline __device__ unsigned __funnelshift_l(unsigned low32, unsigned high32,
434  unsigned shiftWidth) {
435  unsigned result;
436  asm("shf.l.wrap.b32 %0, %1, %2, %3;"
437  : "=r"(result)
438  : "r"(low32), "r"(high32), "r"(shiftWidth));
439  return result;
440 }
441 inline __device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32,
442  unsigned shiftWidth) {
443  unsigned result;
444  asm("shf.l.clamp.b32 %0, %1, %2, %3;"
445  : "=r"(result)
446  : "r"(low32), "r"(high32), "r"(shiftWidth));
447  return result;
448 }
449 inline __device__ unsigned __funnelshift_r(unsigned low32, unsigned high32,
450  unsigned shiftWidth) {
451  unsigned result;
452  asm("shf.r.wrap.b32 %0, %1, %2, %3;"
453  : "=r"(result)
454  : "r"(low32), "r"(high32), "r"(shiftWidth));
455  return result;
456 }
457 inline __device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32,
458  unsigned shiftWidth) {
459  unsigned ret;
460  asm("shf.r.clamp.b32 %0, %1, %2, %3;"
461  : "=r"(ret)
462  : "r"(low32), "r"(high32), "r"(shiftWidth));
463  return ret;
464 }
465 
466 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
467 
468 #endif // defined(__CLANG_CUDA_INTRINSICS_H__)
__device__ char __ldg(const char *ptr)
__device__ unsigned __funnelshift_r(unsigned low32, unsigned high32, unsigned shiftWidth)
__device__ unsigned __funnelshift_l(unsigned low32, unsigned high32, unsigned shiftWidth)
__device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32, unsigned shiftWidth)
__device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32, unsigned shiftWidth)
char __v64qi __attribute__((__vector_size__(64)))
Definition: avx512fintrin.h:30
#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask)