clang 23.0.0git
SPIR.cpp
Go to the documentation of this file.
1//===- SPIR.cpp -----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
11#include "TargetInfo.h"
12#include "clang/AST/DeclCXX.h"
14#include "llvm/IR/DerivedTypes.h"
15
16#include <stdint.h>
17#include <utility>
18
19using namespace clang;
20using namespace clang::CodeGen;
21
22//===----------------------------------------------------------------------===//
23// Base ABI and target codegen info implementation common between SPIR and
24// SPIR-V.
25//===----------------------------------------------------------------------===//
26
27namespace {
28class CommonSPIRABIInfo : public DefaultABIInfo {
29public:
30 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
31
32private:
33 void setCCs();
34};
35
36class SPIRVABIInfo : public CommonSPIRABIInfo {
37public:
38 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
39 void computeInfo(CGFunctionInfo &FI) const override;
40 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
41 AggValueSlot Slot) const override;
42
43 llvm::FixedVectorType *
44 getOptimalVectorMemoryType(llvm::FixedVectorType *Ty,
45 const LangOptions &LangOpt) const override;
46
47private:
48 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
49};
50
51class AMDGCNSPIRVABIInfo : public SPIRVABIInfo {
52 // TODO: this should be unified / shared with AMDGPU, ideally we'd like to
53 // re-use AMDGPUABIInfo eventually, rather than duplicate.
54 static constexpr unsigned MaxNumRegsForArgsRet = 16; // 16 32-bit registers
55 mutable unsigned NumRegsLeft = 0;
56
57 uint64_t numRegsForType(QualType Ty) const;
58
59 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
60 return true;
61 }
62 bool isHomogeneousAggregateSmallEnough(const Type *Base,
63 uint64_t Members) const override {
64 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
65
66 // Homogeneous Aggregates may occupy at most 16 registers.
67 return Members * NumRegs <= MaxNumRegsForArgsRet;
68 }
69
70 // Coerce HIP scalar pointer arguments from generic pointers to global ones.
71 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
72 unsigned ToAS) const;
73
74 ABIArgInfo classifyReturnType(QualType RetTy) const;
75 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
76 ABIArgInfo classifyArgumentType(QualType Ty) const;
77
78public:
79 AMDGCNSPIRVABIInfo(CodeGenTypes &CGT) : SPIRVABIInfo(CGT) {}
80 void computeInfo(CGFunctionInfo &FI) const override;
81
82 llvm::FixedVectorType *
83 getOptimalVectorMemoryType(llvm::FixedVectorType *Ty,
84 const LangOptions &LangOpt) const override;
85};
86} // end anonymous namespace
87namespace {
88class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
89public:
90 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
91 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
92 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
93 : TargetCodeGenInfo(std::move(ABIInfo)) {}
94
95 LangAS getASTAllocaAddressSpace() const override {
97 getABIInfo().getDataLayout().getAllocaAddrSpace());
98 }
99
100 unsigned getDeviceKernelCallingConv() const override;
101 llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override;
102 llvm::Type *getHLSLType(CodeGenModule &CGM, const Type *Ty,
103 const CGHLSLOffsetInfo &OffsetInfo) const override;
104
105 llvm::Type *getHLSLPadding(CodeGenModule &CGM,
106 CharUnits NumBytes) const override {
107 unsigned Size = NumBytes.getQuantity();
108 return llvm::TargetExtType::get(CGM.getLLVMContext(), "spirv.Padding", {},
109 {Size});
110 }
111
112 bool isHLSLPadding(llvm::Type *Ty) const override {
113 if (auto *TET = dyn_cast<llvm::TargetExtType>(Ty))
114 return TET->getName() == "spirv.Padding";
115 return false;
116 }
117
118 llvm::Type *getSPIRVImageTypeFromHLSLResource(
119 const HLSLAttributedResourceType::Attributes &attributes,
120 QualType SampledType, CodeGenModule &CGM) const;
121 void
122 setOCLKernelStubCallingConvention(const FunctionType *&FT) const override;
123 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
124 llvm::PointerType *T,
125 QualType QT) const override;
126};
127class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
128public:
129 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
130 : CommonSPIRTargetCodeGenInfo(
131 (CGT.getTarget().getTriple().getVendor() == llvm::Triple::AMD)
132 ? std::make_unique<AMDGCNSPIRVABIInfo>(CGT)
133 : std::make_unique<SPIRVABIInfo>(CGT)) {}
134 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
135 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
136 const VarDecl *D) const override;
137 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
138 CodeGen::CodeGenModule &M) const override;
139 StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
140 llvm::AtomicOrdering Ordering) const override;
141 void setTargetAtomicMetadata(CodeGenFunction &CGF,
142 llvm::Instruction &AtomicInst,
143 const AtomicExpr *Expr = nullptr) const override;
144 bool supportsLibCall() const override {
145 return getABIInfo().getTarget().getTriple().getVendor() !=
146 llvm::Triple::AMD;
147 }
148
149 LangAS getSRetAddrSpace(const CXXRecordDecl *RD) const override;
150};
151} // End anonymous namespace.
152
153void CommonSPIRABIInfo::setCCs() {
154 assert(getRuntimeCC() == llvm::CallingConv::C);
155 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
156}
157
158ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
159 // Coerce pointer arguments with default address space to CrossWorkGroup
160 // pointers as default address space kernel
161 // arguments are not allowed. We use the opencl_global language address
162 // space which always maps to CrossWorkGroup.
163 llvm::Type *LTy = CGT.ConvertType(Ty);
164 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
165 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::opencl_global);
166 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
167 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
168 LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
169 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
170 }
171
172 if (getContext().getLangOpts().isTargetDevice() &&
174 // Force copying aggregate type in kernel arguments by value when
175 // compiling CUDA targeting SPIR-V. This is required for the object
176 // copied to be valid on the device.
177 // This behavior follows the CUDA spec
178 // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
179 // and matches the NVPTX implementation. TODO: hardcoding to 0 should be
180 // revisited if HIPSPV / byval starts making use of the AS of an indirect
181 // arg.
182 return getNaturalAlignIndirect(Ty, /*AddrSpace=*/0, /*byval=*/true);
183 }
184 return classifyArgumentType(Ty);
185}
186
187void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
188 // The logic is same as in DefaultABIInfo with an exception on the kernel
189 // arguments handling.
190 llvm::CallingConv::ID CC = FI.getCallingConvention();
191
192 for (auto &&[ArgumentsCount, I] : llvm::enumerate(FI.arguments()))
193 I.info = ArgumentsCount < FI.getNumRequiredArgs()
194 ? classifyArgumentType(I.type)
195 : ABIArgInfo::getDirect();
196
197 if (!getCXXABI().classifyReturnType(FI))
199
200 for (auto &I : FI.arguments()) {
201 if (CC == llvm::CallingConv::SPIR_KERNEL) {
202 I.info = classifyKernelArgumentType(I.type);
203 } else {
204 I.info = classifyArgumentType(I.type);
205 }
206 }
207}
208
209RValue SPIRVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
210 QualType Ty, AggValueSlot Slot) const {
211 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*IsIndirect=*/false,
212 getContext().getTypeInfoInChars(Ty),
214 /*AllowHigherAlign=*/true, Slot);
215}
216
217uint64_t AMDGCNSPIRVABIInfo::numRegsForType(QualType Ty) const {
218 // This duplicates the AMDGPUABI computation.
219 uint64_t NumRegs = 0;
220
221 if (const VectorType *VT = Ty->getAs<VectorType>()) {
222 // Compute from the number of elements. The reported size is based on the
223 // in-memory size, which includes the padding 4th element for 3-vectors.
224 QualType EltTy = VT->getElementType();
225 uint64_t EltSize = getContext().getTypeSize(EltTy);
226
227 // 16-bit element vectors should be passed as packed.
228 if (EltSize == 16)
229 return (VT->getNumElements() + 1) / 2;
230
231 uint64_t EltNumRegs = (EltSize + 31) / 32;
232 return EltNumRegs * VT->getNumElements();
233 }
234
235 if (const auto *RD = Ty->getAsRecordDecl()) {
236 assert(!RD->hasFlexibleArrayMember());
237
238 for (const FieldDecl *Field : RD->fields()) {
239 QualType FieldTy = Field->getType();
240 NumRegs += numRegsForType(FieldTy);
241 }
242
243 return NumRegs;
244 }
245
246 return (getContext().getTypeSize(Ty) + 31) / 32;
247}
248
249llvm::Type *AMDGCNSPIRVABIInfo::coerceKernelArgumentType(llvm::Type *Ty,
250 unsigned FromAS,
251 unsigned ToAS) const {
252 // Single value types.
253 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
254 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
255 return llvm::PointerType::get(Ty->getContext(), ToAS);
256 return Ty;
257}
258
259ABIArgInfo AMDGCNSPIRVABIInfo::classifyReturnType(QualType RetTy) const {
260 if (!isAggregateTypeForABI(RetTy) || getRecordArgABI(RetTy, getCXXABI()))
262
263 // Ignore empty structs/unions.
264 if (isEmptyRecord(getContext(), RetTy, true))
265 return ABIArgInfo::getIgnore();
266
267 // Lower single-element structs to just return a regular value.
268 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
269 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
270
271 if (const auto *RD = RetTy->getAsRecordDecl();
272 RD && RD->hasFlexibleArrayMember())
274
275 // Pack aggregates <= 4 bytes into single VGPR or pair.
276 uint64_t Size = getContext().getTypeSize(RetTy);
277 if (Size <= 16)
278 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
279
280 if (Size <= 32)
281 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
282
283 // TODO: This carried over from AMDGPU oddity, we retain it to
284 // ensure consistency, but it might be reasonable to return Int64.
285 if (Size <= 64) {
286 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
287 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
288 }
289
290 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
291 return ABIArgInfo::getDirect();
293}
294
295/// For kernels all parameters are really passed in a special buffer. It doesn't
296/// make sense to pass anything byval, so everything must be direct.
297ABIArgInfo AMDGCNSPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
299
300 // TODO: Can we omit empty structs?
301
302 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
303 Ty = QualType(SeltTy, 0);
304
305 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
306 llvm::Type *LTy = OrigLTy;
307 if (getContext().getLangOpts().isTargetDevice()) {
308 LTy = coerceKernelArgumentType(
309 OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
310 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::opencl_global));
311 }
312
313 // FIXME: This doesn't apply the optimization of coercing pointers in structs
314 // to global address space when using byref. This would require implementing a
315 // new kind of coercion of the in-memory type when for indirect arguments.
316 if (LTy == OrigLTy && isAggregateTypeForABI(Ty)) {
318 getContext().getTypeAlignInChars(Ty),
319 getContext().getTargetAddressSpace(LangAS::opencl_constant),
320 false /*Realign*/, nullptr /*Padding*/);
321 }
322
323 // TODO: inhibiting flattening is an AMDGPU workaround for Clover, which might
324 // be vestigial and should be revisited.
325 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
326}
327
328ABIArgInfo AMDGCNSPIRVABIInfo::classifyArgumentType(QualType Ty) const {
329 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
330
332
333 // TODO: support for variadics.
334
335 if (!isAggregateTypeForABI(Ty)) {
336 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
337 if (!ArgInfo.isIndirect()) {
338 uint64_t NumRegs = numRegsForType(Ty);
339 NumRegsLeft -= std::min(NumRegs, uint64_t{NumRegsLeft});
340 }
341
342 return ArgInfo;
343 }
344
345 // Records with non-trivial destructors/copy-constructors should not be
346 // passed by value.
347 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
348 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
350
351 // Ignore empty structs/unions.
352 if (isEmptyRecord(getContext(), Ty, true))
353 return ABIArgInfo::getIgnore();
354
355 // Lower single-element structs to just pass a regular value. TODO: We
356 // could do reasonable-size multiple-element structs too, using getExpand(),
357 // though watch out for things like bitfields.
358 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
359 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
360
361 if (const auto *RD = Ty->getAsRecordDecl();
362 RD && RD->hasFlexibleArrayMember())
364
365 uint64_t Size = getContext().getTypeSize(Ty);
366 if (Size <= 64) {
367 // Pack aggregates <= 8 bytes into single VGPR or pair.
368 unsigned NumRegs = (Size + 31) / 32;
369 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
370
371 if (Size <= 16)
372 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
373
374 if (Size <= 32)
375 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
376
377 // TODO: This is an AMDGPU oddity, and might be vestigial, we retain it to
378 // ensure consistency, but it should be revisited.
379 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
380 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
381 }
382
383 if (NumRegsLeft > 0) {
384 uint64_t NumRegs = numRegsForType(Ty);
385 if (NumRegsLeft >= NumRegs) {
386 NumRegsLeft -= NumRegs;
387 return ABIArgInfo::getDirect();
388 }
389 }
390
391 // Use pass-by-reference in stead of pass-by-value for struct arguments in
392 // function ABI.
394 getContext().getTypeAlignInChars(Ty),
395 getContext().getTargetAddressSpace(LangAS::opencl_private));
396}
397
398void AMDGCNSPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
399 llvm::CallingConv::ID CC = FI.getCallingConvention();
400
401 if (!getCXXABI().classifyReturnType(FI))
403
404 NumRegsLeft = MaxNumRegsForArgsRet;
405 for (auto &I : FI.arguments()) {
406 if (CC == llvm::CallingConv::SPIR_KERNEL)
407 I.info = classifyKernelArgumentType(I.type);
408 else
409 I.info = classifyArgumentType(I.type);
410 }
411}
412
413llvm::FixedVectorType *
414SPIRVABIInfo::getOptimalVectorMemoryType(llvm::FixedVectorType *Ty,
415 const LangOptions &LangOpt) const {
416 // For Logical SPIR-V, we don't know the underlying hardware or layout.
417 // This means we don't know which vector size is better, and also cannot
418 // assume a smaller vector size is stored in a larger vector size.
419 if (getTarget().getTriple().isSPIRVLogical())
420 return Ty;
421 return DefaultABIInfo::getOptimalVectorMemoryType(Ty, LangOpt);
422}
423
424llvm::FixedVectorType *AMDGCNSPIRVABIInfo::getOptimalVectorMemoryType(
425 llvm::FixedVectorType *Ty, const LangOptions &LangOpt) const {
426 // AMDGPU has legal instructions for 96-bit so 3x32 can be supported.
427 if (Ty->getNumElements() == 3 && getDataLayout().getTypeSizeInBits(Ty) == 96)
428 return Ty;
429 return DefaultABIInfo::getOptimalVectorMemoryType(Ty, LangOpt);
430}
431
432namespace clang {
433namespace CodeGen {
435 if (CGM.getTarget().getTriple().isSPIRV()) {
436 if (CGM.getTarget().getTriple().getVendor() == llvm::Triple::AMD)
437 AMDGCNSPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
438 else
439 SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
440 } else {
441 CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
442 }
443}
444}
445}
446
447unsigned CommonSPIRTargetCodeGenInfo::getDeviceKernelCallingConv() const {
448 return llvm::CallingConv::SPIR_KERNEL;
449}
450
451LangAS SPIRVTargetCodeGenInfo::getSRetAddrSpace(const CXXRecordDecl *RD) const {
452 // Types with no viable copy/move must be constructed in-place, use the
453 // default AS so the sret pointer matches the "this" convention.
454 if (RD && !RD->canPassInRegisters())
455 return LangAS::Default;
456 return getASTAllocaAddressSpace();
457}
458
459void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
460 const FunctionType *&FT) const {
461 // Convert HIP kernels to SPIR-V kernels.
462 if (getABIInfo().getContext().getLangOpts().HIP) {
463 FT = getABIInfo().getContext().adjustFunctionType(
465 return;
466 }
467}
468
469void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention(
470 const FunctionType *&FT) const {
471 FT = getABIInfo().getContext().adjustFunctionType(
473}
474
475// LLVM currently assumes a null pointer has the bit pattern 0, but some GPU
476// targets use a non-zero encoding for null in certain address spaces.
477// Because SPIR(-V) is a generic target and the bit pattern of null in
478// non-generic AS is unspecified, materialize null in non-generic AS via an
479// addrspacecast from null in generic AS. This allows later lowering to
480// substitute the target's real sentinel value.
481llvm::Constant *
482CommonSPIRTargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
483 llvm::PointerType *PT,
484 QualType QT) const {
486 ? LangAS::Default
488 unsigned ASAsInt = static_cast<unsigned>(AS);
489 unsigned FirstTargetASAsInt =
490 static_cast<unsigned>(LangAS::FirstTargetAddressSpace);
491 unsigned CodeSectionINTELAS = FirstTargetASAsInt + 9;
492 // As per SPV_INTEL_function_pointers, it is illegal to addrspacecast
493 // function pointers to/from the generic AS.
494 bool IsFunctionPtrAS =
495 CGM.getTriple().isSPIRV() && ASAsInt == CodeSectionINTELAS;
496 if (AS == LangAS::Default || AS == LangAS::opencl_generic ||
497 AS == LangAS::opencl_constant || IsFunctionPtrAS)
498 return llvm::ConstantPointerNull::get(PT);
499
500 auto &Ctx = CGM.getContext();
501 auto NPT = llvm::PointerType::get(
502 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
503 return llvm::ConstantExpr::getAddrSpaceCast(
504 llvm::ConstantPointerNull::get(NPT), PT);
505}
506
507LangAS
508SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
509 const VarDecl *D) const {
510 assert(!CGM.getLangOpts().OpenCL &&
511 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
512 "Address space agnostic languages only");
513 // If we're here it means that we're using the SPIRDefIsGen ASMap, hence for
514 // the global AS we can rely on either cuda_device or sycl_global to be
515 // correct; however, since this is not a CUDA Device context, we use
516 // sycl_global to prevent confusion with the assertion.
517 LangAS DefaultGlobalAS = getLangASFromTargetAS(
518 CGM.getContext().getTargetAddressSpace(LangAS::sycl_global));
519 if (!D)
520 return DefaultGlobalAS;
521
522 LangAS AddrSpace = D->getType().getAddressSpace();
523 if (AddrSpace != LangAS::Default)
524 return AddrSpace;
525
526 return DefaultGlobalAS;
527}
528
529void SPIRVTargetCodeGenInfo::setTargetAttributes(
530 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
531 if (GV->isDeclaration())
532 return;
533
534 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
535 if (!FD)
536 return;
537
538 llvm::Function *F = dyn_cast<llvm::Function>(GV);
539 assert(F && "Expected GlobalValue to be a Function");
540
541 if (!M.getLangOpts().HIP ||
542 M.getTarget().getTriple().getVendor() != llvm::Triple::AMD)
543 return;
544
545 if (!FD->hasAttr<CUDAGlobalAttr>())
546 return;
547
548 unsigned N = M.getLangOpts().GPUMaxThreadsPerBlock;
549 if (auto FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>())
550 N = FlatWGS->getMax()->EvaluateKnownConstInt(M.getContext()).getExtValue();
551
552 // We encode the maximum flat WG size in the first component of the 3D
553 // max_work_group_size attribute, which will get reverse translated into the
554 // original AMDGPU attribute when targeting AMDGPU.
555 auto Int32Ty = llvm::IntegerType::getInt32Ty(M.getLLVMContext());
556 llvm::Metadata *AttrMDArgs[] = {
557 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, N)),
558 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1)),
559 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1))};
560
561 F->setMetadata("max_work_group_size",
562 llvm::MDNode::get(M.getLLVMContext(), AttrMDArgs));
563}
564
565StringRef SPIRVTargetCodeGenInfo::getLLVMSyncScopeStr(
566 const LangOptions &, SyncScope Scope, llvm::AtomicOrdering) const {
567 switch (Scope) {
568 case SyncScope::HIPSingleThread:
569 case SyncScope::SingleScope:
570 return "singlethread";
571 case SyncScope::HIPWavefront:
572 case SyncScope::OpenCLSubGroup:
573 case SyncScope::WavefrontScope:
574 return "subgroup";
575 case SyncScope::HIPCluster:
576 case SyncScope::ClusterScope:
577 case SyncScope::HIPWorkgroup:
578 case SyncScope::OpenCLWorkGroup:
579 case SyncScope::WorkgroupScope:
580 return "workgroup";
581 case SyncScope::HIPAgent:
582 case SyncScope::OpenCLDevice:
583 case SyncScope::DeviceScope:
584 return "device";
585 case SyncScope::SystemScope:
586 case SyncScope::HIPSystem:
587 case SyncScope::OpenCLAllSVMDevices:
588 return "";
589 }
590 return "";
591}
592
593void SPIRVTargetCodeGenInfo::setTargetAtomicMetadata(
594 CodeGenFunction &CGF, llvm::Instruction &AtomicInst,
595 const AtomicExpr *AE) const {
596 if (CGF.CGM.getTriple().getVendor() != llvm::Triple::VendorType::AMD)
597 return;
598
599 auto *RMW = dyn_cast<llvm::AtomicRMWInst>(&AtomicInst);
600 if (!RMW)
601 return;
602
603 AtomicOptions AO = CGF.CGM.getAtomicOpts();
604 llvm::MDNode *Empty = llvm::MDNode::get(CGF.getLLVMContext(), {});
606 RMW->setMetadata("amdgpu.no.fine.grained.memory", Empty);
608 RMW->setMetadata("amdgpu.no.remote.memory", Empty);
610 RMW->getOperation() == llvm::AtomicRMWInst::FAdd &&
611 RMW->getType()->isFloatTy())
612 RMW->setMetadata("amdgpu.ignore.denormal.mode", Empty);
613}
614
615/// Construct a SPIR-V target extension type for the given OpenCL image type.
616static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType,
617 StringRef OpenCLName,
618 unsigned AccessQualifier) {
619 // These parameters compare to the operands of OpTypeImage (see
620 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage
621 // for more details). The first 6 integer parameters all default to 0, and
622 // will be changed to 1 only for the image type(s) that set the parameter to
623 // one. The 7th integer parameter is the access qualifier, which is tacked on
624 // at the end.
625 SmallVector<unsigned, 7> IntParams = {0, 0, 0, 0, 0, 0};
626
627 // Choose the dimension of the image--this corresponds to the Dim enum in
628 // SPIR-V (first integer parameter of OpTypeImage).
629 if (OpenCLName.starts_with("image2d"))
630 IntParams[0] = 1;
631 else if (OpenCLName.starts_with("image3d"))
632 IntParams[0] = 2;
633 else if (OpenCLName == "image1d_buffer")
634 IntParams[0] = 5; // Buffer
635 else
636 assert(OpenCLName.starts_with("image1d") && "Unknown image type");
637
638 // Set the other integer parameters of OpTypeImage if necessary. Note that the
639 // OpenCL image types don't provide any information for the Sampled or
640 // Image Format parameters.
641 if (OpenCLName.contains("_depth"))
642 IntParams[1] = 1;
643 if (OpenCLName.contains("_array"))
644 IntParams[2] = 1;
645 if (OpenCLName.contains("_msaa"))
646 IntParams[3] = 1;
647
648 // Access qualifier
649 IntParams.push_back(AccessQualifier);
650
651 return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
652 IntParams);
653}
654
655llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
656 const Type *Ty) const {
657 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
658 if (auto *PipeTy = dyn_cast<PipeType>(Ty))
659 return llvm::TargetExtType::get(Ctx, "spirv.Pipe", {},
660 {!PipeTy->isReadOnly()});
661 if (auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
662 enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
663 switch (BuiltinTy->getKind()) {
664#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
665 case BuiltinType::Id: \
666 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
667#include "clang/Basic/OpenCLImageTypes.def"
668 case BuiltinType::OCLSampler:
669 return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
670 case BuiltinType::OCLEvent:
671 return llvm::TargetExtType::get(Ctx, "spirv.Event");
672 case BuiltinType::OCLClkEvent:
673 return llvm::TargetExtType::get(Ctx, "spirv.DeviceEvent");
674 case BuiltinType::OCLQueue:
675 return llvm::TargetExtType::get(Ctx, "spirv.Queue");
676 case BuiltinType::OCLReserveID:
677 return llvm::TargetExtType::get(Ctx, "spirv.ReserveId");
678#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
679 case BuiltinType::OCLIntelSubgroupAVC##Id: \
680 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
681#include "clang/Basic/OpenCLExtensionTypes.def"
682 default:
683 return nullptr;
684 }
685 }
686
687 return nullptr;
688}
689
690// Gets a spirv.IntegralConstant or spirv.Literal. If IntegralType is present,
691// returns an IntegralConstant, otherwise returns a Literal.
692static llvm::Type *getInlineSpirvConstant(CodeGenModule &CGM,
693 llvm::Type *IntegralType,
694 llvm::APInt Value) {
695 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
696
697 // Convert the APInt value to an array of uint32_t words
699
700 while (Value.ugt(0)) {
701 uint32_t Word = Value.trunc(32).getZExtValue();
702 Value.lshrInPlace(32);
703
704 Words.push_back(Word);
705 }
706 if (Words.size() == 0)
707 Words.push_back(0);
708
709 if (IntegralType)
710 return llvm::TargetExtType::get(Ctx, "spirv.IntegralConstant",
711 {IntegralType}, Words);
712 return llvm::TargetExtType::get(Ctx, "spirv.Literal", {}, Words);
713}
714
715static llvm::Type *getInlineSpirvType(CodeGenModule &CGM,
716 const HLSLInlineSpirvType *SpirvType) {
717 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
718
720
721 for (auto &Operand : SpirvType->getOperands()) {
722 using SpirvOperandKind = SpirvOperand::SpirvOperandKind;
723
724 llvm::Type *Result = nullptr;
725 switch (Operand.getKind()) {
726 case SpirvOperandKind::ConstantId: {
727 llvm::Type *IntegralType =
728 CGM.getTypes().ConvertType(Operand.getResultType());
729
730 Result = getInlineSpirvConstant(CGM, IntegralType, Operand.getValue());
731 break;
732 }
733 case SpirvOperandKind::Literal: {
734 Result = getInlineSpirvConstant(CGM, nullptr, Operand.getValue());
735 break;
736 }
737 case SpirvOperandKind::TypeId: {
738 QualType TypeOperand = Operand.getResultType();
739 if (const auto *RD = TypeOperand->getAsRecordDecl()) {
740 assert(RD->isCompleteDefinition() &&
741 "Type completion should have been required in Sema");
742
743 const FieldDecl *HandleField = RD->findFirstNamedDataMember();
744 if (HandleField) {
745 QualType ResourceType = HandleField->getType();
746 if (ResourceType->getAs<HLSLAttributedResourceType>()) {
747 TypeOperand = ResourceType;
748 }
749 }
750 }
751 Result = CGM.getTypes().ConvertType(TypeOperand);
752 break;
753 }
754 default:
755 llvm_unreachable("HLSLInlineSpirvType had invalid operand!");
756 break;
757 }
758
759 assert(Result);
760 Operands.push_back(Result);
761 }
762
763 return llvm::TargetExtType::get(Ctx, "spirv.Type", Operands,
764 {SpirvType->getOpcode(), SpirvType->getSize(),
765 SpirvType->getAlignment()});
766}
767
768llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
769 CodeGenModule &CGM, const Type *Ty,
770 const CGHLSLOffsetInfo &OffsetInfo) const {
771 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
772
773 if (auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Ty))
774 return getInlineSpirvType(CGM, SpirvType);
775
776 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
777 if (!ResType)
778 return nullptr;
779
780 const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs();
781 switch (ResAttrs.ResourceClass) {
782 case llvm::dxil::ResourceClass::UAV:
783 case llvm::dxil::ResourceClass::SRV: {
784 // TypedBuffer and RawBuffer both need element type
785 QualType ContainedTy = ResType->getContainedType();
786 if (ContainedTy.isNull())
787 return nullptr;
788
789 assert(!ResAttrs.IsROV &&
790 "Rasterizer order views not implemented for SPIR-V yet");
791
792 if (!ResAttrs.RawBuffer) {
793 // convert element type
794 return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM);
795 }
796
797 if (ResAttrs.IsCounter) {
798 llvm::Type *ElemType = llvm::Type::getInt32Ty(Ctx);
799 uint32_t StorageClass = /* StorageBuffer storage class */ 12;
800 return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {ElemType},
801 {StorageClass, true});
802 }
803 llvm::Type *ElemType = CGM.getTypes().ConvertTypeForMem(ContainedTy);
804 llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0);
805 uint32_t StorageClass = /* StorageBuffer storage class */ 12;
806 bool IsWritable = ResAttrs.ResourceClass == llvm::dxil::ResourceClass::UAV;
807 return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer",
808 {RuntimeArrayType},
809 {StorageClass, IsWritable});
810 }
811 case llvm::dxil::ResourceClass::CBuffer: {
812 QualType ContainedTy = ResType->getContainedType();
813 if (ContainedTy.isNull() || !ContainedTy->isStructureType())
814 return nullptr;
815
816 llvm::StructType *BufferLayoutTy =
817 HLSLBufferLayoutBuilder(CGM).layOutStruct(
818 ContainedTy->getAsCanonical<RecordType>(), OffsetInfo);
819 uint32_t StorageClass = /* Uniform storage class */ 2;
820 return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {BufferLayoutTy},
821 {StorageClass, false});
822 break;
823 }
824 case llvm::dxil::ResourceClass::Sampler:
825 return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
826 }
827 return nullptr;
828}
829
830static unsigned
832 const HLSLAttributedResourceType::Attributes &attributes,
833 llvm::Type *SampledType, QualType Ty, unsigned NumChannels) {
834 // For images with `Sampled` operand equal to 2, there are restrictions on
835 // using the Unknown image format. To avoid these restrictions in common
836 // cases, we guess an image format for them based on the sampled type and the
837 // number of channels. This is intended to match the behaviour of DXC.
838 if (LangOpts.HLSLSpvUseUnknownImageFormat ||
839 attributes.ResourceClass != llvm::dxil::ResourceClass::UAV) {
840 return 0; // Unknown
841 }
842
843 if (SampledType->isIntegerTy(32)) {
844 if (Ty->isSignedIntegerType()) {
845 if (NumChannels == 1)
846 return 24; // R32i
847 if (NumChannels == 2)
848 return 25; // Rg32i
849 if (NumChannels == 4)
850 return 21; // Rgba32i
851 } else {
852 if (NumChannels == 1)
853 return 33; // R32ui
854 if (NumChannels == 2)
855 return 35; // Rg32ui
856 if (NumChannels == 4)
857 return 30; // Rgba32ui
858 }
859 } else if (SampledType->isIntegerTy(64)) {
860 if (NumChannels == 1) {
861 if (Ty->isSignedIntegerType()) {
862 return 41; // R64i
863 }
864 return 40; // R64ui
865 }
866 } else if (SampledType->isFloatTy()) {
867 if (NumChannels == 1)
868 return 3; // R32f
869 if (NumChannels == 2)
870 return 6; // Rg32f
871 if (NumChannels == 4)
872 return 1; // Rgba32f
873 }
874
875 return 0; // Unknown
876}
877
878llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
879 const HLSLAttributedResourceType::Attributes &attributes, QualType Ty,
880 CodeGenModule &CGM) const {
881 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
882
883 unsigned NumChannels = 1;
885 if (const VectorType *V = dyn_cast<VectorType>(Ty)) {
886 NumChannels = V->getNumElements();
887 Ty = V->getElementType();
888 }
889 assert(!Ty->isVectorType() && "We still have a vector type.");
890
891 llvm::Type *SampledType = CGM.getTypes().ConvertTypeForMem(Ty);
892
893 assert((SampledType->isIntegerTy() || SampledType->isFloatingPointTy()) &&
894 "The element type for a SPIR-V resource must be a scalar integer or "
895 "floating point type.");
896
897 // These parameters correspond to the operands to the OpTypeImage SPIR-V
898 // instruction. See
899 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage.
900 SmallVector<unsigned, 6> IntParams(6, 0);
901
902 const char *Name =
903 Ty->isSignedIntegerType() ? "spirv.SignedImage" : "spirv.Image";
904
905 // Dim
906 switch (attributes.ResourceDimension) {
907 case llvm::dxil::ResourceDimension::Dim1D:
908 IntParams[0] = 0;
909 break;
910 case llvm::dxil::ResourceDimension::Dim2D:
911 IntParams[0] = 1;
912 break;
913 case llvm::dxil::ResourceDimension::Dim3D:
914 IntParams[0] = 2;
915 break;
916 case llvm::dxil::ResourceDimension::Cube:
917 IntParams[0] = 3;
918 break;
919 case llvm::dxil::ResourceDimension::Unknown:
920 IntParams[0] = 5;
921 break;
922 }
923
924 // Depth
925 // HLSL does not indicate if it is a depth texture or not, so we use unknown.
926 IntParams[1] = 2;
927
928 // Arrayed
929 IntParams[2] = 0;
930
931 // MS
932 IntParams[3] = 0;
933
934 // Sampled
935 IntParams[4] =
936 attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
937
938 // Image format.
939 IntParams[5] = getImageFormat(CGM.getLangOpts(), attributes, SampledType, Ty,
940 NumChannels);
941
942 llvm::TargetExtType *ImageType =
943 llvm::TargetExtType::get(Ctx, Name, {SampledType}, IntParams);
944 return ImageType;
945}
946
947std::unique_ptr<TargetCodeGenInfo>
949 return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.getTypes());
950}
951
952std::unique_ptr<TargetCodeGenInfo>
954 return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.getTypes());
955}
#define V(N, I)
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition CGCall.cpp:362
static llvm::Type * getInlineSpirvType(CodeGenModule &CGM, const HLSLInlineSpirvType *SpirvType)
Definition SPIR.cpp:715
static llvm::Type * getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, StringRef OpenCLName, unsigned AccessQualifier)
Construct a SPIR-V target extension type for the given OpenCL image type.
Definition SPIR.cpp:616
static unsigned getImageFormat(const LangOptions &LangOpts, const HLSLAttributedResourceType::Attributes &attributes, llvm::Type *SampledType, QualType Ty, unsigned NumChannels)
Definition SPIR.cpp:831
static llvm::Type * getInlineSpirvConstant(CodeGenModule &CGM, llvm::Type *IntegralType, llvm::APInt Value)
Definition SPIR.cpp:692
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Result
Implement __builtin_bit_cast and related operations.
Defines the clang::LangOptions interface.
static StringRef getTriple(const Command &Job)
unsigned getTargetAddressSpace(LangAS AS) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)
Pass this in memory using the IR byref attribute.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition CGCXXABI.h:158
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
AtomicOptions getAtomicOpts()
Get the current Atomic options.
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
DefaultABIInfo - The default implementation for ABI specific details.
Definition ABIInfoImpl.h:21
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition TargetInfo.h:50
T * getAttr() const
Definition DeclBase.h:581
bool hasAttr() const
Definition DeclBase.h:585
Represents a member of a struct/union/class.
Definition Decl.h:3175
ExtInfo withCallingConv(CallingConv cc) const
Definition TypeBase.h:4781
ExtInfo getExtInfo() const
Definition TypeBase.h:4914
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8562
bool canPassInRegisters() const
Determine whether this class can be passed in registers.
Definition Decl.h:4479
bool hasFlexibleArrayMember() const
Definition Decl.h:4375
const FieldDecl * findFirstNamedDataMember() const
Finds the first data member which has a name.
Definition Decl.cpp:5370
bool isCompleteDefinition() const
Return true if this decl has its body fully specified.
Definition Decl.h:3833
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isStructureType() const
Definition Type.cpp:715
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2266
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
CanQualType getCanonicalTypeUnqualified() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
bool isVectorType() const
Definition TypeBase.h:8812
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
Definition TypeBase.h:2976
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition Type.cpp:690
bool isNullPtrType() const
Definition TypeBase.h:9076
QualType getType() const
Definition Decl.h:723
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Definition SPIR.cpp:434
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "singleelement struct", i.e.
std::unique_ptr< TargetCodeGenInfo > createSPIRVTargetCodeGenInfo(CodeGenModule &CGM)
Definition SPIR.cpp:953
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM)
Definition SPIR.cpp:948
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
The JSON file list parser is used to communicate input to InstallAPI.
StorageClass
Storage classes.
Definition Specifiers.h:249
@ Type
The name was classified as a type.
Definition Sema.h:564
LangAS
Defines the address space values used by the address space qualifier of QualType.
for(const auto &A :T->param_types())
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
@ CC_DeviceKernel
Definition Specifiers.h:293
@ CC_SpirFunction
Definition Specifiers.h:292
LangAS getLangASFromTargetAS(unsigned TargetAS)
unsigned long uint64_t
unsigned int uint32_t
bool getOption(AtomicOptionKind Kind) const