26#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
27#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
29#if defined(__CUDA__) && defined(__clang__)
50#if defined(__GNUC__) && defined(__GLIBC_PREREQ)
51#if __GLIBC_PREREQ(2, 42)
52#define _NV_RSQRT_SPECIFIER noexcept(true)
56#ifndef _NV_RSQRT_SPECIFIER
57#define _NV_RSQRT_SPECIFIER
62#pragma push_macro("__THROW")
63#pragma push_macro("__CUDA_ARCH__")
69#if !defined(CUDA_VERSION)
70#error "cuda.h did not define CUDA_VERSION"
71#elif CUDA_VERSION < 7000
72#error "Unsupported CUDA version!"
75#pragma push_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
76#if CUDA_VERSION >= 10000
77#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
83#define __CUDA_ARCH__ 9999
90#define __DEVICE_LAUNCH_PARAMETERS_H__
95#define __DEVICE_FUNCTIONS_H__
96#define __MATH_FUNCTIONS_H__
97#define __COMMON_FUNCTIONS_H__
100#define __DEVICE_FUNCTIONS_DECLS_H__
103#if CUDA_VERSION < 9000
107#define __CUDA_LIBDEVICE__
111#include "host_defines.h"
113#include "driver_types.h"
114#include "host_config.h"
119#pragma push_macro("nv_weak")
122#undef __CUDA_LIBDEVICE__
124#include "cuda_runtime.h"
126#pragma pop_macro("nv_weak")
132#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)
133#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)
135#if CUDA_VERSION < 9000
136#include "crt/device_runtime.h"
138#include "crt/host_runtime.h"
143#undef __cxa_vec_cctor
148#undef __cxa_vec_delete2
149#undef __cxa_vec_delete
150#undef __cxa_vec_delete3
151#undef __cxa_pure_virtual
169#if CUDA_VERSION >= 9000
185#if defined(CU_DEVICE_INVALID)
186#if !defined(__USE_FAST_MATH__)
187#define __USE_FAST_MATH__ 0
190#if !defined(__CUDA_PREC_DIV)
191#define __CUDA_PREC_DIV 0
197#pragma push_macro("__host__")
198#define __host__ UNEXPECTED_HOST_ATTRIBUTE
204#pragma push_macro("__forceinline__")
205#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
206#if CUDA_VERSION < 9000
207#include "device_functions.hpp"
216#pragma push_macro("__USE_FAST_MATH__")
217#if defined(__CLANG_GPU_APPROX_TRANSCENDENTALS__)
218#define __USE_FAST_MATH__ 1
221#if CUDA_VERSION >= 9000
222#include "crt/math_functions.hpp"
224#include "math_functions.hpp"
227#pragma pop_macro("__USE_FAST_MATH__")
229#if CUDA_VERSION < 9000
230#include "math_functions_dbl_ptx3.hpp"
232#pragma pop_macro("__forceinline__")
236#undef __MATH_FUNCTIONS_HPP__
238#if CUDA_VERSION < 9000
239#include "math_functions.hpp"
256#if CUDA_VERSION < 9000
270#if CUDA_VERSION >= 9000
273#include "device_atomic_functions.h"
275#undef __DEVICE_FUNCTIONS_HPP__
276#include "device_atomic_functions.hpp"
277#if CUDA_VERSION >= 9000
278#include "crt/device_functions.hpp"
279#include "crt/device_double_functions.hpp"
281#include "device_functions.hpp"
283#include "device_double_functions.h"
286#include "sm_20_atomic_functions.hpp"
291#pragma push_macro("__isGlobal")
292#pragma push_macro("__isShared")
293#pragma push_macro("__isConstant")
294#pragma push_macro("__isLocal")
295#define __isGlobal __ignored_cuda___isGlobal
296#define __isShared __ignored_cuda___isShared
297#define __isConstant __ignored_cuda___isConstant
298#define __isLocal __ignored_cuda___isLocal
299#include "sm_20_intrinsics.hpp"
300#pragma pop_macro("__isGlobal")
301#pragma pop_macro("__isShared")
302#pragma pop_macro("__isConstant")
303#pragma pop_macro("__isLocal")
304#pragma push_macro("__DEVICE__")
305#define __DEVICE__ static __device__ __forceinline__ __attribute__((const))
306__DEVICE__ unsigned int __isGlobal(
const void *p) {
307 return __nvvm_isspacep_global(p);
309__DEVICE__ unsigned int __isShared(
const void *p) {
310 return __nvvm_isspacep_shared(p);
312__DEVICE__ unsigned int __isConstant(
const void *p) {
313 return __nvvm_isspacep_const(p);
315__DEVICE__ unsigned int __isLocal(
const void *p) {
316 return __nvvm_isspacep_local(p);
318#pragma pop_macro("__DEVICE__")
319#include "sm_32_atomic_functions.hpp"
329#if CUDA_VERSION >= 8000
330#pragma push_macro("__CUDA_ARCH__")
332#include "sm_60_atomic_functions.hpp"
333#include "sm_61_intrinsics.hpp"
334#pragma pop_macro("__CUDA_ARCH__")
337#undef __MATH_FUNCTIONS_HPP__
343#pragma push_macro("signbit")
344#pragma push_macro("__GNUC__")
346#define signbit __ignored_cuda_signbit
351#pragma push_macro("_GLIBCXX_MATH_H")
352#pragma push_macro("_LIBCPP_VERSION")
353#if CUDA_VERSION >= 9000
354#undef _GLIBCXX_MATH_H
356#ifdef _LIBCPP_VERSION
357#define _LIBCPP_VERSION 3700
361#if CUDA_VERSION >= 9000
362#include "crt/math_functions.hpp"
364#include "math_functions.hpp"
366#pragma pop_macro("_GLIBCXX_MATH_H")
367#pragma pop_macro("_LIBCPP_VERSION")
368#pragma pop_macro("__GNUC__")
369#pragma pop_macro("signbit")
371#pragma pop_macro("__host__")
376#if __cplusplus >= 201103L && CUDA_VERSION >= 9000
381#if CUDA_VERSION >= 9000
383template <
typename T>
struct __nv_tex_needs_cxx11 {
384 const static bool value =
false;
388 cudaTextureObject_t obj,
390 _Static_assert(__nv_tex_needs_cxx11<T>::value,
391 "Texture support requires C++11");
400#include "surface_indirect_functions.h"
401#if CUDA_VERSION < 13000
405#include "texture_fetch_functions.h"
407#include "texture_indirect_functions.h"
410#pragma pop_macro("__CUDA_ARCH__")
411#pragma pop_macro("__THROW")
423__device__ int vprintf(
const char *,
const char *);
424__device__ void free(
void *) __attribute((nothrow));
431__device__ void __assertfail(
const char *__message,
const char *__file,
432 unsigned __line,
const char *__function,
437__device__ static inline void __assert_fail(
const char *__message,
438 const char *__file,
unsigned __line,
439 const char *__function) {
440 __assertfail(__message, __file, __line, __function,
sizeof(
char));
450__device__ static inline void free(
void *__ptr) { ::free(__ptr); }
451__device__ static inline void *malloc(
size_t __size) {
452 return ::malloc(__size);
459__device__ inline __cuda_builtin_threadIdx_t::operator
dim3()
const {
460 return dim3(x, y, z);
467__device__ inline __cuda_builtin_blockIdx_t::operator
dim3()
const {
468 return dim3(x, y, z);
475__device__ inline __cuda_builtin_blockDim_t::operator
dim3()
const {
476 return dim3(x, y, z);
483__device__ inline __cuda_builtin_gridDim_t::operator
dim3()
const {
484 return dim3(x, y, z);
501#pragma push_macro("dim3")
502#pragma push_macro("uint3")
503#define dim3 __cuda_builtin_blockDim_t
504#define uint3 __cuda_builtin_threadIdx_t
505#include "curand_mtgp32_kernel.h"
506#pragma pop_macro("dim3")
507#pragma pop_macro("uint3")
508#pragma pop_macro("__USE_FAST_MATH__")
509#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
515#if CUDA_VERSION >= 9020
517 size_t sharedMem = 0,
__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim
__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim
__DEVICE__ int __signbitd(double __a)
__DEVICE__ float rsqrtf(float __a)
__DEVICE__ double normcdf(double __a)
__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c)
__DEVICE__ float rcbrtf(float __a)
__DEVICE__ float erfcinvf(float __a)
__DEVICE__ float sinpif(float __a)
__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c)
__DEVICE__ double rcbrt(double __a)
__DEVICE__ float normcdff(float __a)
__DEVICE__ double cospi(double __a)
__DEVICE__ double sinpi(double __a)
__DEVICE__ float erfcxf(float __a)
__DEVICE__ float normcdfinvf(float __a)
__DEVICE__ double normcdfinv(double __a)
__DEVICE__ float cospif(float __a)
__DEVICE__ double erfcx(double __a)
__DEVICE__ double rsqrt(double __a)
__DEVICE__ double erfcinv(double __a)
#define __nv_tex_surf_handler(__op, __ptr,...)
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ void int __a
__DEVICE__ bool signbit(float __x)
CLINKAGE int printf(__constant const char *st,...) __attribute__((format(printf