10#include "TargetInfo.h"
24 static const unsigned MaxNumRegsForArgsRet = 16;
26 unsigned numRegsForType(
QualType Ty)
const;
30 uint64_t Members)
const override;
33 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty,
unsigned FromAS,
34 unsigned ToAS)
const {
36 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
37 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
38 return llvm::PointerType::get(Ty->getContext(), ToAS);
49 unsigned &NumRegsLeft)
const;
56bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
60bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
61 const Type *
Base, uint64_t Members)
const {
62 uint32_t NumRegs = (getContext().getTypeSize(
Base) + 31) / 32;
65 return Members * NumRegs <= MaxNumRegsForArgsRet;
69unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
75 QualType EltTy = VT->getElementType();
76 unsigned EltSize = getContext().getTypeSize(EltTy);
80 return (VT->getNumElements() + 1) / 2;
82 unsigned EltNumRegs = (EltSize + 31) / 32;
83 return EltNumRegs * VT->getNumElements();
92 NumRegs += numRegsForType(FieldTy);
98 return (getContext().getTypeSize(Ty) + 31) / 32;
107 unsigned ArgumentIndex = 0;
110 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
112 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
113 Arg.info = classifyKernelArgumentType(Arg.type);
115 bool FixedArgument = ArgumentIndex++ < numFixedArguments;
123 const bool IsIndirect =
false;
124 const bool AllowHigherAlign =
false;
126 getContext().getTypeInfoInChars(Ty),
158 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
162 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
181 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
182 llvm::Type *LTy = OrigLTy;
183 if (getContext().getLangOpts().
HIP) {
184 LTy = coerceKernelArgumentType(
185 OrigLTy, getContext().getTargetAddressSpace(LangAS::Default),
186 getContext().getTargetAddressSpace(LangAS::cuda_device));
195 if (!getContext().getLangOpts().
OpenCL && LTy == OrigLTy &&
198 getContext().getTypeAlignInChars(Ty),
199 getContext().getTargetAddressSpace(LangAS::opencl_constant),
210 unsigned &NumRegsLeft)
const {
211 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
248 unsigned NumRegs = (
Size + 31) / 32;
249 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
258 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
262 if (NumRegsLeft > 0) {
263 unsigned NumRegs = numRegsForType(Ty);
264 if (NumRegsLeft >= NumRegs) {
265 NumRegsLeft -= NumRegs;
273 getContext().getTypeAlignInChars(Ty),
274 getContext().getTargetAddressSpace(LangAS::opencl_private));
280 unsigned NumRegs = numRegsForType(Ty);
281 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
292 void setFunctionDeclAttributes(
const FunctionDecl *FD, llvm::Function *F,
302 llvm::PointerType *
T,
QualType QT)
const override;
306 getABIInfo().getDataLayout().getAllocaAddrSpace());
312 llvm::AtomicOrdering Ordering,
313 llvm::LLVMContext &Ctx)
const override;
315 llvm::AtomicRMWInst &RMW)
const override;
317 llvm::Function *BlockInvokeFunc,
318 llvm::Type *BlockTy)
const override;
326 llvm::GlobalValue *GV) {
327 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
330 return !
D->
hasAttr<OMPDeclareTargetDeclAttr>() &&
332 (isa<FunctionDecl>(
D) &&
D->
hasAttr<CUDAGlobalAttr>()) ||
335 cast<VarDecl>(
D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
336 cast<VarDecl>(
D)->getType()->isCUDADeviceBuiltinTextureType())));
339void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
341 const auto *ReqdWGS =
343 const bool IsOpenCLKernel =
347 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
348 if (ReqdWGS || FlatWGS) {
350 }
else if (IsOpenCLKernel || IsHIPKernel) {
353 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
354 const unsigned DefaultMaxWorkGroupSize =
355 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
357 std::string AttrVal =
358 std::string(
"1,") + llvm::utostr(DefaultMaxWorkGroupSize);
359 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
362 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>())
365 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
366 unsigned NumSGPR =
Attr->getNumSGPR();
369 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
372 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
376 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
379 if (
const auto *
Attr = FD->
getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
385 ?
Attr->getMaxNumWorkGroupsY()
390 ?
Attr->getMaxNumWorkGroupsZ()
396 llvm::raw_svector_ostream OS(AttrVal);
397 OS <<
X <<
',' << Y <<
',' << Z;
399 F->addFnAttr(
"amdgpu-max-num-workgroups", AttrVal.str());
405void AMDGPUTargetCodeGenInfo::emitTargetGlobals(
407 StringRef Name =
"__oclc_ABI_version";
408 llvm::GlobalVariable *OriginalGV = CGM.
getModule().getNamedGlobal(Name);
409 if (OriginalGV && !llvm::GlobalVariable::isExternalLinkage(OriginalGV->getLinkage()))
413 llvm::CodeObjectVersionKind::COV_None)
416 auto *
Type = llvm::IntegerType::getIntNTy(CGM.
getModule().getContext(), 32);
417 llvm::Constant *COV = llvm::ConstantInt::get(
422 auto *GV =
new llvm::GlobalVariable(
423 CGM.
getModule(),
Type,
true, llvm::GlobalValue::WeakODRLinkage, COV, Name,
424 nullptr, llvm::GlobalValue::ThreadLocalMode::NotThreadLocal,
426 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Local);
427 GV->setVisibility(llvm::GlobalValue::VisibilityTypes::HiddenVisibility);
431 OriginalGV->replaceAllUsesWith(GV);
432 GV->takeName(OriginalGV);
433 OriginalGV->eraseFromParent();
437void AMDGPUTargetCodeGenInfo::setTargetAttributes(
440 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
441 GV->setDSOLocal(
true);
444 if (GV->isDeclaration())
447 llvm::Function *F = dyn_cast<llvm::Function>(GV);
453 setFunctionDeclAttributes(FD, F, M);
456 F->addFnAttr(
"amdgpu-unsafe-fp-atomics",
"true");
458 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
459 F->addFnAttr(
"amdgpu-ieee",
"false");
462unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
463 return llvm::CallingConv::AMDGPU_KERNEL;
471llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
475 return llvm::ConstantPointerNull::get(PT);
478 auto NPT = llvm::PointerType::get(
479 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
480 return llvm::ConstantExpr::getAddrSpaceCast(
481 llvm::ConstantPointerNull::get(NPT), PT);
485AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
489 "Address space agnostic languages only");
493 return DefaultGlobalAS;
495 LangAS AddrSpace =
D->getType().getAddressSpace();
496 if (AddrSpace != LangAS::Default)
500 if (
D->getType().isConstantStorage(CGM.
getContext(),
false,
false) &&
501 D->hasConstantInitialization()) {
505 return DefaultGlobalAS;
509AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &LangOpts,
511 llvm::AtomicOrdering Ordering,
512 llvm::LLVMContext &Ctx)
const {
515 case SyncScope::HIPSingleThread:
516 case SyncScope::SingleScope:
517 Name =
"singlethread";
519 case SyncScope::HIPWavefront:
520 case SyncScope::OpenCLSubGroup:
521 case SyncScope::WavefrontScope:
524 case SyncScope::HIPWorkgroup:
525 case SyncScope::OpenCLWorkGroup:
526 case SyncScope::WorkgroupScope:
529 case SyncScope::HIPAgent:
530 case SyncScope::OpenCLDevice:
531 case SyncScope::DeviceScope:
534 case SyncScope::SystemScope:
535 case SyncScope::HIPSystem:
536 case SyncScope::OpenCLAllSVMDevices:
541 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
543 Name = Twine(Twine(Name) + Twine(
"-")).str();
545 Name = Twine(Twine(Name) + Twine(
"one-as")).str();
548 return Ctx.getOrInsertSyncScopeID(Name);
551void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
558 llvm::AtomicRMWInst::BinOp RMWOp = RMW.getOperation();
559 if (llvm::AtomicRMWInst::isFPOperation(RMWOp)) {
561 RMW.setMetadata(
"amdgpu.no.fine.grained.memory",
Empty);
563 if (RMWOp == llvm::AtomicRMWInst::FAdd && RMW.getType()->isFloatTy())
564 RMW.setMetadata(
"amdgpu.ignore.denormal.mode",
Empty);
568bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
572bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators()
const {
576void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
578 FT = getABIInfo().getContext().adjustFunctionType(
590llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
591 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy)
const {
595 auto *InvokeFT = Invoke->getFunctionType();
604 ArgTys.push_back(BlockTy);
605 ArgTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
606 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
607 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
608 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
609 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
610 ArgNames.push_back(llvm::MDString::get(
C,
"block_literal"));
611 for (
unsigned I = 1,
E = InvokeFT->getNumParams(); I <
E; ++I) {
612 ArgTys.push_back(InvokeFT->getParamType(I));
613 ArgTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
614 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
615 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
616 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
617 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
619 llvm::MDString::get(
C, (Twine(
"local_arg") + Twine(I)).str()));
621 std::string Name = Invoke->getName().str() +
"_kernel";
622 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(
C), ArgTys,
false);
623 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
625 F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
627 llvm::AttrBuilder KernelAttrs(
C);
631 KernelAttrs.addAttribute(
"enqueued-block");
632 F->addFnAttrs(KernelAttrs);
634 auto IP = CGF.
Builder.saveIP();
635 auto *BB = llvm::BasicBlock::Create(
C,
"entry", F);
636 Builder.SetInsertPoint(BB);
638 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
639 BlockPtr->setAlignment(BlockAlign);
640 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
641 auto *
Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
643 Args.push_back(Cast);
644 for (llvm::Argument &A : llvm::drop_begin(F->args()))
646 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
647 call->setCallingConv(Invoke->getCallingConv());
648 Builder.CreateRetVoid();
649 Builder.restoreIP(IP);
651 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(
C, AddressQuals));
652 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(
C, AccessQuals));
653 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(
C, ArgTypeNames));
654 F->setMetadata(
"kernel_arg_base_type",
655 llvm::MDNode::get(
C, ArgBaseTypeNames));
656 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(
C, ArgTypeQuals));
658 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(
C, ArgNames));
664 llvm::Function *F,
const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
665 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
666 int32_t *MaxThreadsVal) {
670 Min = FlatWGS->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
671 Max = FlatWGS->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue();
673 if (ReqdWGS &&
Min == 0 &&
Max == 0)
674 Min =
Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
677 assert(
Min <=
Max &&
"Min must be less than or equal Max");
680 *MinThreadsVal =
Min;
682 *MaxThreadsVal =
Max;
683 std::string AttrVal = llvm::utostr(
Min) +
"," + llvm::utostr(
Max);
685 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
687 assert(
Max == 0 &&
"Max must be zero");
691 llvm::Function *F,
const AMDGPUWavesPerEUAttr *
Attr) {
693 Attr->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
696 ?
Attr->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue()
700 assert((
Max == 0 ||
Min <=
Max) &&
"Min must be less than or equal Max");
702 std::string AttrVal = llvm::utostr(
Min);
704 AttrVal = AttrVal +
"," + llvm::utostr(
Max);
705 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
707 assert(
Max == 0 &&
"Max must be zero");
710std::unique_ptr<TargetCodeGenInfo>
712 return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.
getTypes());
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
Defines the clang::TargetOptions class.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
const TargetInfo & getTargetInfo() const
unsigned getTargetAddressSpace(LangAS AS) const
Attr - This represents one attribute.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)
Pass this in memory using the IR byref attribute.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
unsigned getNumRequiredArgs() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
const TargetInfo & getTarget() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
void handleAMDGPUWavesPerEUAttr(llvm::Function *F, const AMDGPUWavesPerEUAttr *A)
Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to F.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
void handleAMDGPUFlatWorkGroupSizeAttr(llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A, const ReqdWorkGroupSizeAttr *ReqdWGS=nullptr, int32_t *MinThreadsVal=nullptr, int32_t *MaxThreadsVal=nullptr)
Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute to F.
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, AggValueSlot Slot) const override
EmitVAArg - Emit the target dependent code to load a value of.
ABIArgInfo classifyReturnType(QualType RetTy) const
void computeInfo(CGFunctionInfo &FI) const override
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
const T & getABIInfo() const
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual bool shouldEmitDWARFBitFieldSeparators() const
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::AtomicRMWInst &RMW) const
Allow the target to apply other metadata to an atomic instruction.
virtual LangAS getASTAllocaAddressSpace() const
Get the AST address space for alloca.
virtual llvm::Value * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Type *BlockTy) const
Create an OpenCL kernel for an enqueued block.
virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const
Provides a convenient hook to handle extra target-specific globals.
virtual bool shouldEmitStaticExternCAliases() const
Decl - This represents one declaration (or definition), e.g.
Represents a member of a struct/union/class.
Represents a function declaration or definition.
ExtInfo withCallingConv(CallingConv cc) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_range fields() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Scope - A scope is a transient data structure that is used while parsing the program.
TargetOptions & getTargetOpts() const
Retrieve the target options.
virtual std::optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory.
bool allowAMDGPUUnsafeFPAtomics() const
Returns whether or not the AMDGPU unsafe floating point atomics are allowed.
llvm::CodeObjectVersionKind CodeObjectVersion
Code object version for AMDGPU.
The base class of the type hierarchy.
const T * getAs() const
Member-template getAs<specific type>'.
Represents a variable declaration or definition.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Cast(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
LangAS
Defines the address space values used by the address space qualifier of QualType.
const FunctionProtoType * T
SyncScope
Defines synch scope values used internally by clang.
LangAS getLangASFromTargetAS(unsigned TargetAS)