26#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
27#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
29#if defined(__CUDA__) && defined(__clang__)
50#pragma push_macro("__THROW")
51#pragma push_macro("__CUDA_ARCH__")
57#if !defined(CUDA_VERSION)
58#error "cuda.h did not define CUDA_VERSION"
59#elif CUDA_VERSION < 7000
60#error "Unsupported CUDA version!"
63#pragma push_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
64#if CUDA_VERSION >= 10000
65#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
71#define __CUDA_ARCH__ 9999
78#define __DEVICE_LAUNCH_PARAMETERS_H__
83#define __DEVICE_FUNCTIONS_H__
84#define __MATH_FUNCTIONS_H__
85#define __COMMON_FUNCTIONS_H__
88#define __DEVICE_FUNCTIONS_DECLS_H__
91#if CUDA_VERSION < 9000
95#define __CUDA_LIBDEVICE__
99#include "host_defines.h"
101#include "driver_types.h"
102#include "host_config.h"
107#pragma push_macro("nv_weak")
110#undef __CUDA_LIBDEVICE__
112#include "cuda_runtime.h"
114#pragma pop_macro("nv_weak")
120#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)
121#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)
123#if CUDA_VERSION < 9000
124#include "crt/device_runtime.h"
126#include "crt/host_runtime.h"
131#undef __cxa_vec_cctor
136#undef __cxa_vec_delete2
137#undef __cxa_vec_delete
138#undef __cxa_vec_delete3
139#undef __cxa_pure_virtual
157#if CUDA_VERSION >= 9000
173#if defined(CU_DEVICE_INVALID)
174#if !defined(__USE_FAST_MATH__)
175#define __USE_FAST_MATH__ 0
178#if !defined(__CUDA_PREC_DIV)
179#define __CUDA_PREC_DIV 0
185#pragma push_macro("__host__")
186#define __host__ UNEXPECTED_HOST_ATTRIBUTE
192#pragma push_macro("__forceinline__")
193#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
194#if CUDA_VERSION < 9000
195#include "device_functions.hpp"
204#pragma push_macro("__USE_FAST_MATH__")
205#if defined(__CLANG_GPU_APPROX_TRANSCENDENTALS__)
206#define __USE_FAST_MATH__ 1
209#if CUDA_VERSION >= 9000
210#include "crt/math_functions.hpp"
212#include "math_functions.hpp"
215#pragma pop_macro("__USE_FAST_MATH__")
217#if CUDA_VERSION < 9000
218#include "math_functions_dbl_ptx3.hpp"
220#pragma pop_macro("__forceinline__")
224#undef __MATH_FUNCTIONS_HPP__
226#if CUDA_VERSION < 9000
227#include "math_functions.hpp"
244#if CUDA_VERSION < 9000
258#if CUDA_VERSION >= 9000
261#include "device_atomic_functions.h"
263#undef __DEVICE_FUNCTIONS_HPP__
264#include "device_atomic_functions.hpp"
265#if CUDA_VERSION >= 9000
266#include "crt/device_functions.hpp"
267#include "crt/device_double_functions.hpp"
269#include "device_functions.hpp"
271#include "device_double_functions.h"
274#include "sm_20_atomic_functions.hpp"
279#pragma push_macro("__isGlobal")
280#pragma push_macro("__isShared")
281#pragma push_macro("__isConstant")
282#pragma push_macro("__isLocal")
283#define __isGlobal __ignored_cuda___isGlobal
284#define __isShared __ignored_cuda___isShared
285#define __isConstant __ignored_cuda___isConstant
286#define __isLocal __ignored_cuda___isLocal
287#include "sm_20_intrinsics.hpp"
288#pragma pop_macro("__isGlobal")
289#pragma pop_macro("__isShared")
290#pragma pop_macro("__isConstant")
291#pragma pop_macro("__isLocal")
292#pragma push_macro("__DEVICE__")
293#define __DEVICE__ static __device__ __forceinline__ __attribute__((const))
294__DEVICE__ unsigned int __isGlobal(
const void *p) {
295 return __nvvm_isspacep_global(p);
297__DEVICE__ unsigned int __isShared(
const void *p) {
298 return __nvvm_isspacep_shared(p);
300__DEVICE__ unsigned int __isConstant(
const void *p) {
301 return __nvvm_isspacep_const(p);
303__DEVICE__ unsigned int __isLocal(
const void *p) {
304 return __nvvm_isspacep_local(p);
306#pragma pop_macro("__DEVICE__")
307#include "sm_32_atomic_functions.hpp"
317#if CUDA_VERSION >= 8000
318#pragma push_macro("__CUDA_ARCH__")
320#include "sm_60_atomic_functions.hpp"
321#include "sm_61_intrinsics.hpp"
322#pragma pop_macro("__CUDA_ARCH__")
325#undef __MATH_FUNCTIONS_HPP__
331#pragma push_macro("signbit")
332#pragma push_macro("__GNUC__")
334#define signbit __ignored_cuda_signbit
339#pragma push_macro("_GLIBCXX_MATH_H")
340#pragma push_macro("_LIBCPP_VERSION")
341#if CUDA_VERSION >= 9000
342#undef _GLIBCXX_MATH_H
344#ifdef _LIBCPP_VERSION
345#define _LIBCPP_VERSION 3700
349#if CUDA_VERSION >= 9000
350#include "crt/math_functions.hpp"
352#include "math_functions.hpp"
354#pragma pop_macro("_GLIBCXX_MATH_H")
355#pragma pop_macro("_LIBCPP_VERSION")
356#pragma pop_macro("__GNUC__")
357#pragma pop_macro("signbit")
359#pragma pop_macro("__host__")
364#if __cplusplus >= 201103L && CUDA_VERSION >= 9000
369#if CUDA_VERSION >= 9000
371template <
typename T>
struct __nv_tex_needs_cxx11 {
372 const static bool value =
false;
376 cudaTextureObject_t obj,
378 _Static_assert(__nv_tex_needs_cxx11<T>::value,
379 "Texture support requires C++11");
388#include "surface_indirect_functions.h"
389#if CUDA_VERSION < 13000
393#include "texture_fetch_functions.h"
395#include "texture_indirect_functions.h"
398#pragma pop_macro("__CUDA_ARCH__")
399#pragma pop_macro("__THROW")
411__device__ int vprintf(
const char *,
const char *);
412__device__ void free(
void *) __attribute((nothrow));
419__device__ void __assertfail(
const char *__message,
const char *__file,
420 unsigned __line,
const char *__function,
425__device__ static inline void __assert_fail(
const char *__message,
426 const char *__file,
unsigned __line,
427 const char *__function) {
428 __assertfail(__message, __file, __line, __function,
sizeof(
char));
438__device__ static inline void free(
void *__ptr) { ::free(__ptr); }
439__device__ static inline void *malloc(
size_t __size) {
440 return ::malloc(__size);
447__device__ inline __cuda_builtin_threadIdx_t::operator
dim3()
const {
448 return dim3(x, y, z);
455__device__ inline __cuda_builtin_blockIdx_t::operator
dim3()
const {
456 return dim3(x, y, z);
463__device__ inline __cuda_builtin_blockDim_t::operator
dim3()
const {
464 return dim3(x, y, z);
471__device__ inline __cuda_builtin_gridDim_t::operator
dim3()
const {
472 return dim3(x, y, z);
489#pragma push_macro("dim3")
490#pragma push_macro("uint3")
491#define dim3 __cuda_builtin_blockDim_t
492#define uint3 __cuda_builtin_threadIdx_t
493#include "curand_mtgp32_kernel.h"
494#pragma pop_macro("dim3")
495#pragma pop_macro("uint3")
496#pragma pop_macro("__USE_FAST_MATH__")
497#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
503#if CUDA_VERSION >= 9020
505 size_t sharedMem = 0,
__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim
__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim
__DEVICE__ int __signbitd(double __a)
__DEVICE__ float rsqrtf(float __a)
__DEVICE__ double normcdf(double __a)
__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c)
__DEVICE__ float rcbrtf(float __a)
__DEVICE__ float erfcinvf(float __a)
__DEVICE__ float sinpif(float __a)
__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c)
__DEVICE__ double rcbrt(double __a)
__DEVICE__ float normcdff(float __a)
__DEVICE__ double cospi(double __a)
__DEVICE__ double sinpi(double __a)
__DEVICE__ float erfcxf(float __a)
__DEVICE__ float normcdfinvf(float __a)
__DEVICE__ double normcdfinv(double __a)
__DEVICE__ float cospif(float __a)
__DEVICE__ double erfcx(double __a)
__DEVICE__ double rsqrt(double __a)
__DEVICE__ double erfcinv(double __a)
#define __nv_tex_surf_handler(__op, __ptr,...)
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ void int __a
__DEVICE__ bool signbit(float __x)
CLINKAGE int printf(__constant const char *st,...) __attribute__((format(printf