9#ifndef __CLANG_CUDA_INTRINSICS_H__
10#define __CLANG_CUDA_INTRINSICS_H__
12#error "This file is for CUDA compilation only."
17#define __SM_30_INTRINSICS_H__
18#define __SM_30_INTRINSICS_HPP__
20#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
22#pragma push_macro("__MAKE_SHUFFLES")
23#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask, \
25 inline __device__ int __FnName(int __val, __Type __offset, \
26 int __width = warpSize) { \
27 return __IntIntrinsic(__val, __offset, \
28 ((warpSize - __width) << 8) | (__Mask)); \
30 inline __device__ float __FnName(float __val, __Type __offset, \
31 int __width = warpSize) { \
32 return __FloatIntrinsic(__val, __offset, \
33 ((warpSize - __width) << 8) | (__Mask)); \
35 inline __device__ unsigned int __FnName(unsigned int __val, __Type __offset, \
36 int __width = warpSize) { \
37 return static_cast<unsigned int>( \
38 ::__FnName(static_cast<int>(__val), __offset, __width)); \
40 inline __device__ long long __FnName(long long __val, __Type __offset, \
41 int __width = warpSize) { \
45 _Static_assert(sizeof(__val) == sizeof(__Bits)); \
46 _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \
48 memcpy(&__tmp, &__val, sizeof(__val)); \
49 __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \
50 __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \
52 memcpy(&__ret, &__tmp, sizeof(__tmp)); \
55 inline __device__ long __FnName(long __val, __Type __offset, \
56 int __width = warpSize) { \
57 _Static_assert(sizeof(long) == sizeof(long long) || \
58 sizeof(long) == sizeof(int)); \
59 if (sizeof(long) == sizeof(long long)) { \
60 return static_cast<long>( \
61 ::__FnName(static_cast<long long>(__val), __offset, __width)); \
62 } else if (sizeof(long) == sizeof(int)) { \
63 return static_cast<long>( \
64 ::__FnName(static_cast<int>(__val), __offset, __width)); \
67 inline __device__ unsigned long __FnName( \
68 unsigned long __val, __Type __offset, int __width = warpSize) { \
69 return static_cast<unsigned long>( \
70 ::__FnName(static_cast<long>(__val), __offset, __width)); \
72 inline __device__ unsigned long long __FnName( \
73 unsigned long long __val, __Type __offset, int __width = warpSize) { \
74 return static_cast<unsigned long long>( \
75 ::__FnName(static_cast<long long>(__val), __offset, __width)); \
77 inline __device__ double __FnName(double __val, __Type __offset, \
78 int __width = warpSize) { \
80 _Static_assert(sizeof(__tmp) == sizeof(__val)); \
81 memcpy(&__tmp, &__val, sizeof(__val)); \
82 __tmp = ::__FnName(__tmp, __offset, __width); \
84 memcpy(&__ret, &__tmp, sizeof(__ret)); \
97#pragma pop_macro("__MAKE_SHUFFLES")
101#if CUDA_VERSION >= 9000
102#if (!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300)
104#pragma push_macro("__MAKE_SYNC_SHUFFLES")
105#define __MAKE_SYNC_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, \
107 inline __device__ int __FnName(unsigned int __mask, int __val, \
108 __Type __offset, int __width = warpSize) { \
109 return __IntIntrinsic(__mask, __val, __offset, \
110 ((warpSize - __width) << 8) | (__Mask)); \
112 inline __device__ float __FnName(unsigned int __mask, float __val, \
113 __Type __offset, int __width = warpSize) { \
114 return __FloatIntrinsic(__mask, __val, __offset, \
115 ((warpSize - __width) << 8) | (__Mask)); \
117 inline __device__ unsigned int __FnName(unsigned int __mask, \
118 unsigned int __val, __Type __offset, \
119 int __width = warpSize) { \
120 return static_cast<unsigned int>( \
121 ::__FnName(__mask, static_cast<int>(__val), __offset, __width)); \
123 inline __device__ long long __FnName(unsigned int __mask, long long __val, \
125 int __width = warpSize) { \
129 _Static_assert(sizeof(__val) == sizeof(__Bits)); \
130 _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \
132 memcpy(&__tmp, &__val, sizeof(__val)); \
133 __tmp.__a = ::__FnName(__mask, __tmp.__a, __offset, __width); \
134 __tmp.__b = ::__FnName(__mask, __tmp.__b, __offset, __width); \
136 memcpy(&__ret, &__tmp, sizeof(__tmp)); \
139 inline __device__ unsigned long long __FnName( \
140 unsigned int __mask, unsigned long long __val, __Type __offset, \
141 int __width = warpSize) { \
142 return static_cast<unsigned long long>( \
143 ::__FnName(__mask, static_cast<long long>(__val), __offset, __width)); \
145 inline __device__ long __FnName(unsigned int __mask, long __val, \
146 __Type __offset, int __width = warpSize) { \
147 _Static_assert(sizeof(long) == sizeof(long long) || \
148 sizeof(long) == sizeof(int)); \
149 if (sizeof(long) == sizeof(long long)) { \
150 return static_cast<long>(::__FnName( \
151 __mask, static_cast<long long>(__val), __offset, __width)); \
152 } else if (sizeof(long) == sizeof(int)) { \
153 return static_cast<long>( \
154 ::__FnName(__mask, static_cast<int>(__val), __offset, __width)); \
157 inline __device__ unsigned long __FnName( \
158 unsigned int __mask, unsigned long __val, __Type __offset, \
159 int __width = warpSize) { \
160 return static_cast<unsigned long>( \
161 ::__FnName(__mask, static_cast<long>(__val), __offset, __width)); \
163 inline __device__ double __FnName(unsigned int __mask, double __val, \
164 __Type __offset, int __width = warpSize) { \
166 _Static_assert(sizeof(__tmp) == sizeof(__val)); \
167 memcpy(&__tmp, &__val, sizeof(__val)); \
168 __tmp = ::__FnName(__mask, __tmp, __offset, __width); \
170 memcpy(&__ret, &__tmp, sizeof(__ret)); \
173__MAKE_SYNC_SHUFFLES(__shfl_sync, __nvvm_shfl_sync_idx_i32,
174 __nvvm_shfl_sync_idx_f32, 0x1f,
int);
177__MAKE_SYNC_SHUFFLES(__shfl_up_sync, __nvvm_shfl_sync_up_i32,
178 __nvvm_shfl_sync_up_f32, 0,
unsigned int);
179__MAKE_SYNC_SHUFFLES(__shfl_down_sync, __nvvm_shfl_sync_down_i32,
180 __nvvm_shfl_sync_down_f32, 0x1f,
unsigned int);
181__MAKE_SYNC_SHUFFLES(__shfl_xor_sync, __nvvm_shfl_sync_bfly_i32,
182 __nvvm_shfl_sync_bfly_f32, 0x1f,
int);
183#pragma pop_macro("__MAKE_SYNC_SHUFFLES")
185inline __device__ void __syncwarp(
unsigned int mask = 0xffffffff) {
186 return __nvvm_bar_warp_sync(mask);
189inline __device__ void __barrier_sync(
unsigned int id) {
190 __nvvm_barrier_sync(
id);
193inline __device__ void __barrier_sync_count(
unsigned int id,
194 unsigned int count) {
195 __nvvm_barrier_sync_cnt(
id, count);
198inline __device__ int __all_sync(
unsigned int mask,
int pred) {
199 return __nvvm_vote_all_sync(mask, pred);
202inline __device__ int __any_sync(
unsigned int mask,
int pred) {
203 return __nvvm_vote_any_sync(mask, pred);
206inline __device__ int __uni_sync(
unsigned int mask,
int pred) {
207 return __nvvm_vote_uni_sync(mask, pred);
210inline __device__ unsigned int __ballot_sync(
unsigned int mask,
int pred) {
211 return __nvvm_vote_ballot_sync(mask, pred);
214inline __device__ unsigned int __activemask() {
215#if CUDA_VERSION < 9020
216 return __nvvm_vote_ballot(1);
218 return __nvvm_activemask();
222inline __device__ unsigned int __fns(
unsigned mask,
unsigned base,
int offset) {
223 return __nvvm_fns(mask, base, offset);
229#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
230inline __device__ unsigned int __match32_any_sync(
unsigned int mask,
231 unsigned int value) {
232 return __nvvm_match_any_sync_i32(mask, value);
236__match64_any_sync(
unsigned int mask,
unsigned long long value) {
237 return __nvvm_match_any_sync_i64(mask, value);
241__match32_all_sync(
unsigned int mask,
unsigned int value,
int *pred) {
242 return __nvvm_match_all_sync_i32p(mask, value, pred);
246__match64_all_sync(
unsigned int mask,
unsigned long long value,
int *pred) {
247 return __nvvm_match_all_sync_i64p(mask, value, pred);
249#include "crt/sm_70_rt.hpp"
257#define __SM_32_INTRINSICS_H__
258#define __SM_32_INTRINSICS_HPP__
260#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
267 return __nvvm_ldg_ll(ptr);
270 return __nvvm_ldg_uc(ptr);
273 return __nvvm_ldg_uc((
const unsigned char *)ptr);
276 return __nvvm_ldg_us(ptr);
279 return __nvvm_ldg_ui(ptr);
282 return __nvvm_ldg_ul(ptr);
285 return __nvvm_ldg_ull(ptr);
295 c2 rv = __nvvm_ldg_c2(
reinterpret_cast<const c2 *
>(ptr));
303 c4 rv = __nvvm_ldg_c4(
reinterpret_cast<const c4 *
>(ptr));
313 s2 rv = __nvvm_ldg_s2(
reinterpret_cast<const s2 *
>(ptr));
321 s4 rv = __nvvm_ldg_s4(
reinterpret_cast<const s4 *
>(ptr));
331 i2 rv = __nvvm_ldg_i2(
reinterpret_cast<const i2 *
>(ptr));
339 i4 rv = __nvvm_ldg_i4(
reinterpret_cast<const i4 *
>(ptr));
349 ll2 rv = __nvvm_ldg_ll2(
reinterpret_cast<const ll2 *
>(ptr));
357 typedef unsigned char uc2
__attribute__((ext_vector_type(2)));
358 uc2 rv = __nvvm_ldg_uc2(
reinterpret_cast<const uc2 *
>(ptr));
365 typedef unsigned char uc4
__attribute__((ext_vector_type(4)));
366 uc4 rv = __nvvm_ldg_uc4(
reinterpret_cast<const uc4 *
>(ptr));
375 typedef unsigned short us2
__attribute__((ext_vector_type(2)));
376 us2 rv = __nvvm_ldg_us2(
reinterpret_cast<const us2 *
>(ptr));
383 typedef unsigned short us4
__attribute__((ext_vector_type(4)));
384 us4 rv = __nvvm_ldg_us4(
reinterpret_cast<const us4 *
>(ptr));
393 typedef unsigned int ui2
__attribute__((ext_vector_type(2)));
394 ui2 rv = __nvvm_ldg_ui2(
reinterpret_cast<const ui2 *
>(ptr));
401 typedef unsigned int ui4
__attribute__((ext_vector_type(4)));
402 ui4 rv = __nvvm_ldg_ui4(
reinterpret_cast<const ui4 *
>(ptr));
411 typedef unsigned long long ull2
__attribute__((ext_vector_type(2)));
412 ull2 rv = __nvvm_ldg_ull2(
reinterpret_cast<const ull2 *
>(ptr));
421 f2 rv = __nvvm_ldg_f2(
reinterpret_cast<const f2 *
>(ptr));
429 f4 rv = __nvvm_ldg_f4(
reinterpret_cast<const f4 *
>(ptr));
439 d2 rv = __nvvm_ldg_d2(
reinterpret_cast<const d2 *
>(ptr));
450 unsigned shiftWidth) {
452 asm(
"shf.l.wrap.b32 %0, %1, %2, %3;"
454 :
"r"(low32),
"r"(high32),
"r"(shiftWidth));
458 unsigned shiftWidth) {
460 asm(
"shf.l.clamp.b32 %0, %1, %2, %3;"
462 :
"r"(low32),
"r"(high32),
"r"(shiftWidth));
466 unsigned shiftWidth) {
468 asm(
"shf.r.wrap.b32 %0, %1, %2, %3;"
470 :
"r"(low32),
"r"(high32),
"r"(shiftWidth));
474 unsigned shiftWidth) {
476 asm(
"shf.r.clamp.b32 %0, %1, %2, %3;"
478 :
"r"(low32),
"r"(high32),
"r"(shiftWidth));
484#if CUDA_VERSION >= 11000
486__device__ inline size_t __nv_cvta_generic_to_global_impl(
const void *__ptr) {
487 return (
size_t)(
void __attribute__((address_space(1))) *)__ptr;
489__device__ inline size_t __nv_cvta_generic_to_shared_impl(
const void *__ptr) {
490 return (
size_t)(
void __attribute__((address_space(3))) *)__ptr;
492__device__ inline size_t __nv_cvta_generic_to_constant_impl(
const void *__ptr) {
493 return (
size_t)(
void __attribute__((address_space(4))) *)__ptr;
495__device__ inline size_t __nv_cvta_generic_to_local_impl(
const void *__ptr) {
496 return (
size_t)(
void __attribute__((address_space(5))) *)__ptr;
498__device__ inline void *__nv_cvta_global_to_generic_impl(
size_t __ptr) {
499 return (
void *)(
void __attribute__((address_space(1))) *)__ptr;
501__device__ inline void *__nv_cvta_shared_to_generic_impl(
size_t __ptr) {
502 return (
void *)(
void __attribute__((address_space(3))) *)__ptr;
504__device__ inline void *__nv_cvta_constant_to_generic_impl(
size_t __ptr) {
505 return (
void *)(
void __attribute__((address_space(4))) *)__ptr;
507__device__ inline void *__nv_cvta_local_to_generic_impl(
size_t __ptr) {
508 return (
void *)(
void __attribute__((address_space(5))) *)__ptr;
510__device__ inline cuuint32_t __nvvm_get_smem_pointer(
void *__ptr) {
511 return __nv_cvta_generic_to_shared_impl(__ptr);
515#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
516__device__ inline unsigned __reduce_add_sync(
unsigned __mask,
518 return __nvvm_redux_sync_add(__mask,
__value);
520__device__ inline unsigned __reduce_min_sync(
unsigned __mask,
522 return __nvvm_redux_sync_umin(__mask,
__value);
524__device__ inline unsigned __reduce_max_sync(
unsigned __mask,
526 return __nvvm_redux_sync_umax(__mask,
__value);
529 return __nvvm_redux_sync_min(__mask,
__value);
532 return __nvvm_redux_sync_max(__mask,
__value);
535 return __nvvm_redux_sync_or(__mask,
__value);
537__device__ inline unsigned __reduce_and_sync(
unsigned __mask,
539 return __nvvm_redux_sync_and(__mask,
__value);
541__device__ inline unsigned __reduce_xor_sync(
unsigned __mask,
543 return __nvvm_redux_sync_xor(__mask,
__value);
546__device__ inline void __nv_memcpy_async_shared_global_4(
void *__dst,
548 unsigned __src_size) {
549 __nvvm_cp_async_ca_shared_global_4(
553__device__ inline void __nv_memcpy_async_shared_global_8(
void *__dst,
555 unsigned __src_size) {
556 __nvvm_cp_async_ca_shared_global_8(
560__device__ inline void __nv_memcpy_async_shared_global_16(
void *__dst,
562 unsigned __src_size) {
563 __nvvm_cp_async_ca_shared_global_16(
569__nv_associate_access_property(
const void *__ptr,
unsigned long long __prop) {
578#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
579__device__ inline unsigned __isCtaShared(
const void *ptr) {
580 return __isShared(ptr);
583__device__ inline unsigned __isClusterShared(
const void *__ptr) {
584 return __nvvm_isspacep_shared_cluster(__ptr);
587__device__ inline void *__cluster_map_shared_rank(
const void *__ptr,
589 return __nvvm_mapa((
void *)__ptr, __rank);
592__device__ inline unsigned __cluster_query_shared_rank(
const void *__ptr) {
593 return __nvvm_getctarank((
void *)__ptr);
597__cluster_map_shared_multicast(
const void *__ptr,
598 unsigned int __cluster_cta_mask) {
599 return make_uint2((
unsigned)__cvta_generic_to_shared(__ptr),
603__device__ inline unsigned __clusterDimIsSpecified() {
604 return __nvvm_is_explicit_cluster();
608 return dim3(__nvvm_read_ptx_sreg_cluster_nctaid_x(),
609 __nvvm_read_ptx_sreg_cluster_nctaid_y(),
610 __nvvm_read_ptx_sreg_cluster_nctaid_z());
614 return dim3(__nvvm_read_ptx_sreg_cluster_ctaid_x(),
615 __nvvm_read_ptx_sreg_cluster_ctaid_y(),
616 __nvvm_read_ptx_sreg_cluster_ctaid_z());
620 return dim3(__nvvm_read_ptx_sreg_nclusterid_x(),
621 __nvvm_read_ptx_sreg_nclusterid_y(),
622 __nvvm_read_ptx_sreg_nclusterid_z());
626 return dim3(__nvvm_read_ptx_sreg_clusterid_x(),
627 __nvvm_read_ptx_sreg_clusterid_y(),
628 __nvvm_read_ptx_sreg_clusterid_z());
631__device__ inline unsigned __clusterRelativeBlockRank() {
632 return __nvvm_read_ptx_sreg_cluster_ctarank();
635__device__ inline unsigned __clusterSizeInBlocks() {
636 return __nvvm_read_ptx_sreg_cluster_nctarank();
639__device__ inline void __cluster_barrier_arrive() {
640 __nvvm_barrier_cluster_arrive();
643__device__ inline void __cluster_barrier_arrive_relaxed() {
644 __nvvm_barrier_cluster_arrive_relaxed();
647__device__ inline void __cluster_barrier_wait() {
648 __nvvm_barrier_cluster_wait();
651__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); }
655 __asm__(
"atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
656 :
"=f"(__ret.x),
"=f"(__ret.y)
657 :
"l"(__ptr),
"f"(__val.x),
"f"(__val.y));
663 __asm__(
"atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
664 :
"=f"(__ret.x),
"=f"(__ret.y)
665 :
"l"(__ptr),
"f"(__val.x),
"f"(__val.y));
671 __asm__(
"atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
672 :
"=f"(__ret.x),
"=f"(__ret.y)
673 :
"l"(__ptr),
"f"(__val.x),
"f"(__val.y));
679 __asm__(
"atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
680 :
"=f"(__ret.x),
"=f"(__ret.y),
"=f"(__ret.z),
"=f"(__ret.w)
681 :
"l"(__ptr),
"f"(__val.x),
"f"(__val.y),
"f"(__val.z),
"f"(__val.w));
688 "atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
689 :
"=f"(__ret.x),
"=f"(__ret.y),
"=f"(__ret.z),
"=f"(__ret.w)
690 :
"l"(__ptr),
"f"(__val.x),
"f"(__val.y),
"f"(__val.z),
"f"(__val.w));
697 "atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
698 :
"=f"(__ret.x),
"=f"(__ret.y),
"=f"(__ret.z),
"=f"(__ret.w)
699 :
"l"(__ptr),
"f"(__val.x),
"f"(__val.y),
"f"(__val.z),
"f"(__val.w)
__device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32, unsigned shiftWidth)
__device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32, unsigned shiftWidth)
#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask, __Type)
__device__ unsigned __funnelshift_r(unsigned low32, unsigned high32, unsigned shiftWidth)
__device__ char __ldg(const char *ptr)
__device__ unsigned __funnelshift_l(unsigned low32, unsigned high32, unsigned shiftWidth)
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
static __inline__ void const void * __src
static __inline__ void unsigned int __value
vector< float, 4 > float4
vector< float, 2 > float2