clang 23.0.0git
__clang_cuda_runtime_wrapper.h
Go to the documentation of this file.
1/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===
2 *
3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 * See https://llvm.org/LICENSE.txt for license information.
5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 *
7 *===-----------------------------------------------------------------------===
8 */
9
10/*
11 * WARNING: This header is intended to be directly -include'd by
12 * the compiler and is not supposed to be included by users.
13 *
14 * CUDA headers are implemented in a way that currently makes it
15 * impossible for user code to #include directly when compiling with
16 * Clang. They present different view of CUDA-supplied functions
17 * depending on where in NVCC's compilation pipeline the headers are
18 * included. Neither of these modes provides function definitions with
19 * correct attributes, so we use preprocessor to force the headers
20 * into a form that Clang can use.
21 *
22 * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's
23 * this file during every CUDA compilation.
24 */
25
26#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
27#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
28
29#if defined(__CUDA__) && defined(__clang__)
30
31// Include some forward declares that must come before cmath.
33
34// Define __CUDACC__ early as libstdc++ standard headers with GNU extensions
35// enabled depend on it to avoid using __float128, which is unsupported in
36// CUDA.
37#define __CUDACC__
38
39// Include some standard headers to avoid CUDA headers including them
40// while some required macros (like __THROW) are in a weird state.
41#include <climits>
42#include <cmath>
43#include <cstdlib>
44#include <stdlib.h>
45#include <string.h>
46#undef __CUDACC__
47
48// math_functions.h from CUDA 13.2+ defines _NV_RSQRT_SPECIFIER.
49// Clang does not include it, so we need to define it ourselves.
50#if defined(__GNUC__) && defined(__GLIBC_PREREQ)
51#if __GLIBC_PREREQ(2, 42)
52#define _NV_RSQRT_SPECIFIER noexcept(true)
53#endif
54#endif
55
56#ifndef _NV_RSQRT_SPECIFIER
57#define _NV_RSQRT_SPECIFIER
58#endif
59
60// Preserve common macros that will be changed below by us or by CUDA
61// headers.
62#pragma push_macro("__THROW")
63#pragma push_macro("__CUDA_ARCH__")
64
65// WARNING: Preprocessor hacks below are based on specific details of
66// CUDA-7.x headers and are not expected to work with any other
67// version of CUDA headers.
68#include "cuda.h"
69#if !defined(CUDA_VERSION)
70#error "cuda.h did not define CUDA_VERSION"
71#elif CUDA_VERSION < 7000
72#error "Unsupported CUDA version!"
73#endif
74
75#pragma push_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
76#if CUDA_VERSION >= 10000
77#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
78#endif
79
80// Make largest subset of device functions available during host
81// compilation.
82#ifndef __CUDA_ARCH__
83#define __CUDA_ARCH__ 9999
84#endif
85
87
88// No need for device_launch_parameters.h as __clang_cuda_builtin_vars.h above
89// has taken care of builtin variables declared in the file.
90#define __DEVICE_LAUNCH_PARAMETERS_H__
91
92// {math,device}_functions.h only have declarations of the
93// functions. We don't need them as we're going to pull in their
94// definitions from .hpp files.
95#define __DEVICE_FUNCTIONS_H__
96#define __MATH_FUNCTIONS_H__
97#define __COMMON_FUNCTIONS_H__
98// device_functions_decls is replaced by __clang_cuda_device_functions.h
99// included below.
100#define __DEVICE_FUNCTIONS_DECLS_H__
101
102#undef __CUDACC__
103#if CUDA_VERSION < 9000
104#define __CUDABE__
105#else
106#define __CUDACC__
107#define __CUDA_LIBDEVICE__
108#endif
109// Disables definitions of device-side runtime support stubs in
110// cuda_device_runtime_api.h
111#include "host_defines.h"
112#undef __CUDACC__
113#include "driver_types.h"
114#include "host_config.h"
115
116// Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in
117// cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the
118// functional equivalent of what we need.
119#pragma push_macro("nv_weak")
120#define nv_weak weak
121#undef __CUDABE__
122#undef __CUDA_LIBDEVICE__
123#define __CUDACC__
124#include "cuda_runtime.h"
125
126#pragma pop_macro("nv_weak")
127#undef __CUDACC__
128#define __CUDABE__
129
130// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does
131// not have at the moment. Emulate them with a builtin memcpy/memset.
132#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)
133#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)
134
135#if CUDA_VERSION < 9000
136#include "crt/device_runtime.h"
137#endif
138#include "crt/host_runtime.h"
139// device_runtime.h defines __cxa_* macros that will conflict with
140// cxxabi.h.
141// FIXME: redefine these as __device__ functions.
142#undef __cxa_vec_ctor
143#undef __cxa_vec_cctor
144#undef __cxa_vec_dtor
145#undef __cxa_vec_new
146#undef __cxa_vec_new2
147#undef __cxa_vec_new3
148#undef __cxa_vec_delete2
149#undef __cxa_vec_delete
150#undef __cxa_vec_delete3
151#undef __cxa_pure_virtual
152
153// math_functions.hpp expects this host function be defined on MacOS, but it
154// ends up not being there because of the games we play here. Just define it
155// ourselves; it's simple enough.
156#ifdef __APPLE__
157inline __host__ double __signbitd(double x) {
158 return std::signbit(x);
159}
160#endif
161
162// CUDA 9.1 no longer provides declarations for libdevice functions, so we need
163// to provide our own.
165
166// Wrappers for many device-side standard library functions, incl. math
167// functions, became compiler builtins in CUDA-9 and have been removed from the
168// CUDA headers. Clang now provides its own implementation of the wrappers.
169#if CUDA_VERSION >= 9000
171#include <__clang_cuda_math.h>
172#endif
173
174// __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's
175// counterpart does not do it, so we need to make it empty here to keep
176// following CUDA includes happy.
177#undef __THROW
178#define __THROW
179
180// CUDA 8.0.41 relies on __USE_FAST_MATH__ and __CUDA_PREC_DIV's values.
181// Previous versions used to check whether they are defined or not.
182// CU_DEVICE_INVALID macro is only defined in 8.0.41, so we use it
183// here to detect the switch.
184
185#if defined(CU_DEVICE_INVALID)
186#if !defined(__USE_FAST_MATH__)
187#define __USE_FAST_MATH__ 0
188#endif
189
190#if !defined(__CUDA_PREC_DIV)
191#define __CUDA_PREC_DIV 0
192#endif
193#endif
194
195// Temporarily poison __host__ macro to ensure it's not used by any of
196// the headers we're about to include.
197#pragma push_macro("__host__")
198#define __host__ UNEXPECTED_HOST_ATTRIBUTE
199
200// device_functions.hpp and math_functions*.hpp use 'static
201// __forceinline__' (with no __device__) for definitions of device
202// functions. Temporarily redefine __forceinline__ to include
203// __device__.
204#pragma push_macro("__forceinline__")
205#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
206#if CUDA_VERSION < 9000
207#include "device_functions.hpp"
208#endif
209
210// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we
211// get the slow-but-accurate or fast-but-inaccurate versions of functions like
212// sin and exp. This is controlled in clang by -fgpu-approx-transcendentals.
213//
214// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs.
215// slow divides), so we need to scope our define carefully here.
216#pragma push_macro("__USE_FAST_MATH__")
217#if defined(__CLANG_GPU_APPROX_TRANSCENDENTALS__)
218#define __USE_FAST_MATH__ 1
219#endif
220
221#if CUDA_VERSION >= 9000
222#include "crt/math_functions.hpp"
223#else
224#include "math_functions.hpp"
225#endif
226
227#pragma pop_macro("__USE_FAST_MATH__")
228
229#if CUDA_VERSION < 9000
230#include "math_functions_dbl_ptx3.hpp"
231#endif
232#pragma pop_macro("__forceinline__")
233
234// Pull in host-only functions that are only available when neither
235// __CUDACC__ nor __CUDABE__ are defined.
236#undef __MATH_FUNCTIONS_HPP__
237#undef __CUDABE__
238#if CUDA_VERSION < 9000
239#include "math_functions.hpp"
240#endif
241// Alas, additional overloads for these functions are hard to get to.
242// Considering that we only need these overloads for a few functions,
243// we can provide them here.
244static inline float rsqrt(float __a) { return rsqrtf(__a); }
245static inline float rcbrt(float __a) { return rcbrtf(__a); }
246static inline float sinpi(float __a) { return sinpif(__a); }
247static inline float cospi(float __a) { return cospif(__a); }
248static inline void sincospi(float __a, float *__b, float *__c) {
249 return sincospif(__a, __b, __c);
250}
251static inline float erfcinv(float __a) { return erfcinvf(__a); }
252static inline float normcdfinv(float __a) { return normcdfinvf(__a); }
253static inline float normcdf(float __a) { return normcdff(__a); }
254static inline float erfcx(float __a) { return erfcxf(__a); }
255
256#if CUDA_VERSION < 9000
257// For some reason single-argument variant is not always declared by
258// CUDA headers. Alas, device_functions.hpp included below needs it.
259static inline __device__ void __brkpt(int __c) { __brkpt(); }
260#endif
261
262// Now include *.hpp with definitions of various GPU functions. Alas,
263// a lot of thins get declared/defined with __host__ attribute which
264// we don't want and we have to define it out. We also have to include
265// {device,math}_functions.hpp again in order to extract the other
266// branch of #if/else inside.
267#define __host__
268#undef __CUDABE__
269#define __CUDACC__
270#if CUDA_VERSION >= 9000
271// Some atomic functions became compiler builtins in CUDA-9 , so we need their
272// declarations.
273#include "device_atomic_functions.h"
274#endif
275#undef __DEVICE_FUNCTIONS_HPP__
276#include "device_atomic_functions.hpp"
277#if CUDA_VERSION >= 9000
278#include "crt/device_functions.hpp"
279#include "crt/device_double_functions.hpp"
280#else
281#include "device_functions.hpp"
282#define __CUDABE__
283#include "device_double_functions.h"
284#undef __CUDABE__
285#endif
286#include "sm_20_atomic_functions.hpp"
287// Predicate functions used in `__builtin_assume` need to have no side effect.
288// However, sm_20_intrinsics.hpp doesn't define them with neither pure nor
289// const attribute. Rename definitions from sm_20_intrinsics.hpp and re-define
290// them as pure ones.
291#pragma push_macro("__isGlobal")
292#pragma push_macro("__isShared")
293#pragma push_macro("__isConstant")
294#pragma push_macro("__isLocal")
295#define __isGlobal __ignored_cuda___isGlobal
296#define __isShared __ignored_cuda___isShared
297#define __isConstant __ignored_cuda___isConstant
298#define __isLocal __ignored_cuda___isLocal
299#include "sm_20_intrinsics.hpp"
300#pragma pop_macro("__isGlobal")
301#pragma pop_macro("__isShared")
302#pragma pop_macro("__isConstant")
303#pragma pop_macro("__isLocal")
304#pragma push_macro("__DEVICE__")
305#define __DEVICE__ static __device__ __forceinline__ __attribute__((const))
306__DEVICE__ unsigned int __isGlobal(const void *p) {
307 return __nvvm_isspacep_global(p);
308}
309__DEVICE__ unsigned int __isShared(const void *p) {
310 return __nvvm_isspacep_shared(p);
311}
312__DEVICE__ unsigned int __isConstant(const void *p) {
313 return __nvvm_isspacep_const(p);
314}
315__DEVICE__ unsigned int __isLocal(const void *p) {
316 return __nvvm_isspacep_local(p);
317}
318#pragma pop_macro("__DEVICE__")
319#include "sm_32_atomic_functions.hpp"
320
321// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h. These define the
322// __shfl and __ldg intrinsics using inline (volatile) asm, but we want to
323// define them using builtins so that the optimizer can reason about and across
324// these instructions. In particular, using intrinsics for ldg gets us the
325// [addr+imm] addressing mode, which, although it doesn't actually exist in the
326// hardware, seems to generate faster machine code because ptxas can more easily
327// reason about our code.
328
329#if CUDA_VERSION >= 8000
330#pragma push_macro("__CUDA_ARCH__")
331#undef __CUDA_ARCH__
332#include "sm_60_atomic_functions.hpp"
333#include "sm_61_intrinsics.hpp"
334#pragma pop_macro("__CUDA_ARCH__")
335#endif
336
337#undef __MATH_FUNCTIONS_HPP__
338
339// math_functions.hpp defines ::signbit as a __host__ __device__ function. This
340// conflicts with libstdc++'s constexpr ::signbit, so we have to rename
341// math_function.hpp's ::signbit. It's guarded by #undef signbit, but that's
342// conditional on __GNUC__. :)
343#pragma push_macro("signbit")
344#pragma push_macro("__GNUC__")
345#undef __GNUC__
346#define signbit __ignored_cuda_signbit
347
348// CUDA-9 omits device-side definitions of some math functions if it sees
349// include guard from math.h wrapper from libstdc++. We have to undo the header
350// guard temporarily to get the definitions we need.
351#pragma push_macro("_GLIBCXX_MATH_H")
352#pragma push_macro("_LIBCPP_VERSION")
353#if CUDA_VERSION >= 9000
354#undef _GLIBCXX_MATH_H
355// We also need to undo another guard that checks for libc++ 3.8+
356#ifdef _LIBCPP_VERSION
357#define _LIBCPP_VERSION 3700
358#endif
359#endif
360
361#if CUDA_VERSION >= 9000
362#include "crt/math_functions.hpp"
363#else
364#include "math_functions.hpp"
365#endif
366#pragma pop_macro("_GLIBCXX_MATH_H")
367#pragma pop_macro("_LIBCPP_VERSION")
368#pragma pop_macro("__GNUC__")
369#pragma pop_macro("signbit")
370
371#pragma pop_macro("__host__")
372
373// __clang_cuda_texture_intrinsics.h must be included first in order to provide
374// implementation for __nv_tex_surf_handler that CUDA's headers depend on.
375// The implementation requires c++11 and only works with CUDA-9 or newer.
376#if __cplusplus >= 201103L && CUDA_VERSION >= 9000
377// clang-format off
379// clang-format on
380#else
381#if CUDA_VERSION >= 9000
382// Provide a hint that texture support needs C++11.
383template <typename T> struct __nv_tex_needs_cxx11 {
384 const static bool value = false;
385};
386template <class T>
387__host__ __device__ void __nv_tex_surf_handler(const char *name, T *ptr,
388 cudaTextureObject_t obj,
389 float x) {
390 _Static_assert(__nv_tex_needs_cxx11<T>::value,
391 "Texture support requires C++11");
392}
393#else
394// Textures in CUDA-8 and older are not supported by clang.There's no
395// convenient way to intercept texture use in these versions, so we can't
396// produce a meaningful error. The source code that attempts to use textures
397// will continue to fail as it does now.
398#endif // CUDA_VERSION
399#endif // __cplusplus >= 201103L && CUDA_VERSION >= 9000
400#include "surface_indirect_functions.h"
401#if CUDA_VERSION < 13000
402// Direct texture fetch functions had been deprecated since CUDA-11.
403// The file in CUDA-12 only carried unused texture types, and is no longer
404// needed.
405#include "texture_fetch_functions.h"
406#endif // CUDA_VERSION < 13000
407#include "texture_indirect_functions.h"
408
409// Restore state of __CUDA_ARCH__ and __THROW we had on entry.
410#pragma pop_macro("__CUDA_ARCH__")
411#pragma pop_macro("__THROW")
412
413// Set up compiler macros expected to be seen during compilation.
414#undef __CUDABE__
415#define __CUDACC__
416
417extern "C" {
418// Device-side CUDA system calls.
419// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls
420// We need these declarations and wrappers for device-side
421// malloc/free/printf calls to work without relying on
422// -fcuda-disable-target-call-checks option.
423__device__ int vprintf(const char *, const char *);
424__device__ void free(void *) __attribute((nothrow));
425__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc));
426
427// __assertfail() used to have a `noreturn` attribute. Unfortunately that
428// contributed to triggering the longstanding bug in ptxas when assert was used
429// in sufficiently convoluted code. See
430// https://bugs.llvm.org/show_bug.cgi?id=27738 for the details.
431__device__ void __assertfail(const char *__message, const char *__file,
432 unsigned __line, const char *__function,
433 size_t __charSize);
434
435// In order for standard assert() macro on linux to work we need to
436// provide device-side __assert_fail()
437__device__ static inline void __assert_fail(const char *__message,
438 const char *__file, unsigned __line,
439 const char *__function) {
440 __assertfail(__message, __file, __line, __function, sizeof(char));
441}
442
443// Clang will convert printf into vprintf, but we still need
444// device-side declaration for it.
445__device__ int printf(const char *, ...);
446} // extern "C"
447
448// We also need device-side std::malloc and std::free.
449namespace std {
450__device__ static inline void free(void *__ptr) { ::free(__ptr); }
451__device__ static inline void *malloc(size_t __size) {
452 return ::malloc(__size);
453}
454} // namespace std
455
456// Out-of-line implementations from __clang_cuda_builtin_vars.h. These need to
457// come after we've pulled in the definition of uint3 and dim3.
458
459__device__ inline __cuda_builtin_threadIdx_t::operator dim3() const {
460 return dim3(x, y, z);
461}
462
463__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
464 return {x, y, z};
465}
466
467__device__ inline __cuda_builtin_blockIdx_t::operator dim3() const {
468 return dim3(x, y, z);
469}
470
471__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {
472 return {x, y, z};
473}
474
475__device__ inline __cuda_builtin_blockDim_t::operator dim3() const {
476 return dim3(x, y, z);
477}
478
479__device__ inline __cuda_builtin_blockDim_t::operator uint3() const {
480 return {x, y, z};
481}
482
483__device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
484 return dim3(x, y, z);
485}
486
487__device__ inline __cuda_builtin_gridDim_t::operator uint3() const {
488 return {x, y, z};
489}
490
491#include <__clang_cuda_cmath.h>
494
495// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host
496// mode, giving them their "proper" types of dim3 and uint3. This is
497// incompatible with the types we give in __clang_cuda_builtin_vars.h. As as
498// hack, force-include the header (nvcc doesn't include it by default) but
499// redefine dim3 and uint3 to our builtin types. (Thankfully dim3 and uint3 are
500// only used here for the redeclarations of blockDim and threadIdx.)
501#pragma push_macro("dim3")
502#pragma push_macro("uint3")
503#define dim3 __cuda_builtin_blockDim_t
504#define uint3 __cuda_builtin_threadIdx_t
505#include "curand_mtgp32_kernel.h"
506#pragma pop_macro("dim3")
507#pragma pop_macro("uint3")
508#pragma pop_macro("__USE_FAST_MATH__")
509#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
510
511// CUDA runtime uses this undocumented function to access kernel launch
512// configuration. The declaration is in crt/device_functions.h but that file
513// includes a lot of other stuff we don't want. Instead, we'll provide our own
514// declaration for it here.
515#if CUDA_VERSION >= 9020
516extern "C" unsigned __cudaPushCallConfiguration(dim3 gridDim, dim3 blockDim,
517 size_t sharedMem = 0,
518 void *stream = 0);
519#endif
520
521#endif // __CUDA__
522#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim
__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim
#define __DEVICE__
__DEVICE__ int __signbitd(double __a)
__DEVICE__ float rsqrtf(float __a)
__DEVICE__ double normcdf(double __a)
__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c)
__DEVICE__ float rcbrtf(float __a)
__DEVICE__ float erfcinvf(float __a)
__DEVICE__ float sinpif(float __a)
__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c)
__DEVICE__ double rcbrt(double __a)
__DEVICE__ float normcdff(float __a)
__DEVICE__ double cospi(double __a)
__DEVICE__ double sinpi(double __a)
__DEVICE__ float erfcxf(float __a)
__DEVICE__ float normcdfinvf(float __a)
__DEVICE__ double normcdfinv(double __a)
__DEVICE__ float cospif(float __a)
__DEVICE__ double erfcx(double __a)
__DEVICE__ double rsqrt(double __a)
__DEVICE__ double erfcinv(double __a)
#define __nv_tex_surf_handler(__op, __ptr,...)
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
#define __host__
#define __device__
struct dim3 dim3
static __inline__ vector float vector float vector float __c
Definition altivec.h:4800
static __inline__ vector float vector float __b
Definition altivec.h:578
static __inline__ void int __a
Definition emmintrin.h:4077
vector< uint, 3 > uint3
__DEVICE__ bool signbit(float __x)
CLINKAGE int printf(__constant const char *st,...) __attribute__((format(printf