10#include "TargetInfo.h"
11#include "llvm/ADT/StringExtras.h"
12#include "llvm/Support/AMDGPUAddrSpace.h"
25 static const unsigned MaxNumRegsForArgsRet = 16;
27 unsigned numRegsForType(QualType Ty)
const;
29 bool isHomogeneousAggregateBaseType(QualType Ty)
const override;
30 bool isHomogeneousAggregateSmallEnough(
const Type *Base,
31 uint64_t Members)
const override;
34 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty,
unsigned FromAS,
35 unsigned ToAS)
const {
37 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
38 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
39 return llvm::PointerType::get(Ty->getContext(), ToAS);
44 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
45 DefaultABIInfo(CGT) {}
48 ABIArgInfo classifyKernelArgumentType(QualType Ty)
const;
50 unsigned &NumRegsLeft)
const;
52 void computeInfo(CGFunctionInfo &FI)
const override;
53 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
54 AggValueSlot Slot)
const override;
56 llvm::FixedVectorType *
57 getOptimalVectorMemoryType(llvm::FixedVectorType *
T,
58 const LangOptions &Opt)
const override {
62 if (
T->getNumElements() == 3 && getDataLayout().getTypeSizeInBits(
T) == 96)
64 return DefaultABIInfo::getOptimalVectorMemoryType(
T, Opt);
68bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
72bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
73 const Type *Base, uint64_t Members)
const {
74 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
77 return Members * NumRegs <= MaxNumRegsForArgsRet;
81unsigned AMDGPUABIInfo::numRegsForType(QualType Ty)
const {
84 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
87 QualType EltTy = VT->getElementType();
88 unsigned EltSize = getContext().getTypeSize(EltTy);
92 return (VT->getNumElements() + 1) / 2;
94 unsigned EltNumRegs = (EltSize + 31) / 32;
95 return EltNumRegs * VT->getNumElements();
99 assert(!RD->hasFlexibleArrayMember());
101 for (
const FieldDecl *Field : RD->fields()) {
102 QualType FieldTy =
Field->getType();
103 NumRegs += numRegsForType(FieldTy);
109 return (getContext().getTypeSize(Ty) + 31) / 32;
112void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI)
const {
118 unsigned ArgumentIndex = 0;
121 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
123 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
124 Arg.info = classifyKernelArgumentType(Arg.type);
126 bool FixedArgument = ArgumentIndex++ < numFixedArguments;
132RValue AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
133 QualType Ty, AggValueSlot Slot)
const {
134 const bool IsIndirect =
false;
135 const bool AllowHigherAlign =
false;
137 getContext().getTypeInfoInChars(Ty),
141ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy)
const {
167 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
171 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
182ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty)
const {
188 Ty = QualType(SeltTy, 0);
190 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
191 llvm::Type *LTy = OrigLTy;
192 if (getContext().getLangOpts().
HIP) {
193 LTy = coerceKernelArgumentType(
194 OrigLTy, getContext().getTargetAddressSpace(LangAS::Default),
195 getContext().getTargetAddressSpace(LangAS::cuda_device));
203 getContext().getTypeAlignInChars(Ty),
204 getContext().getTargetAddressSpace(LangAS::opencl_constant),
214ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
bool Variadic,
215 unsigned &NumRegsLeft)
const {
216 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
232 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
252 unsigned NumRegs = (
Size + 31) / 32;
253 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
262 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
266 if (NumRegsLeft > 0) {
267 unsigned NumRegs = numRegsForType(Ty);
268 if (NumRegsLeft >= NumRegs) {
269 NumRegsLeft -= NumRegs;
277 getContext().getTypeAlignInChars(Ty),
278 getContext().getTargetAddressSpace(LangAS::opencl_private));
284 unsigned NumRegs = numRegsForType(Ty);
285 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
291class AMDGPUTargetCodeGenInfo :
public TargetCodeGenInfo {
293 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
294 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
296 bool supportsLibCall()
const override {
return false; }
297 void setFunctionDeclAttributes(
const FunctionDecl *FD, llvm::Function *F,
298 CodeGenModule &CGM)
const;
300 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
301 CodeGen::CodeGenModule &M)
const override;
302 unsigned getDeviceKernelCallingConv()
const override;
304 llvm::Constant *getNullPointer(
const CodeGen::CodeGenModule &CGM,
305 llvm::PointerType *
T, QualType QT)
const override;
307 LangAS getASTAllocaAddressSpace()
const override {
309 getABIInfo().getDataLayout().getAllocaAddrSpace());
311 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
312 const VarDecl *D)
const override;
313 llvm::SyncScope::ID getLLVMSyncScopeID(
const LangOptions &LangOpts,
315 llvm::AtomicOrdering Ordering,
316 llvm::LLVMContext &Ctx)
const override;
317 void setTargetAtomicMetadata(CodeGenFunction &CGF,
318 llvm::Instruction &AtomicInst,
319 const AtomicExpr *Expr =
nullptr)
const override;
320 llvm::Value *createEnqueuedBlockKernel(CodeGenFunction &CGF,
321 llvm::Function *BlockInvokeFunc,
322 llvm::Type *BlockTy)
const override;
323 bool shouldEmitStaticExternCAliases()
const override;
324 bool shouldEmitDWARFBitFieldSeparators()
const override;
330 llvm::GlobalValue *GV) {
331 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
334 return !D->
hasAttr<OMPDeclareTargetDeclAttr>() &&
335 (D->
hasAttr<DeviceKernelAttr>() ||
338 (D->
hasAttr<CUDADeviceAttr>() || D->
hasAttr<CUDAConstantAttr>() ||
339 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
340 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType())));
343void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
344 const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M)
const {
345 const auto *ReqdWGS =
347 const bool IsOpenCLKernel =
351 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
352 if (ReqdWGS || FlatWGS) {
354 }
else if (IsOpenCLKernel || IsHIPKernel) {
357 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
358 const unsigned DefaultMaxWorkGroupSize =
359 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
361 std::string AttrVal =
362 std::string(
"1,") + llvm::utostr(DefaultMaxWorkGroupSize);
363 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
366 if (
const auto *Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>())
369 if (
const auto *Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
370 unsigned NumSGPR = Attr->getNumSGPR();
373 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
376 if (
const auto *Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
377 uint32_t NumVGPR = Attr->getNumVGPR();
380 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
383 if (
const auto *Attr = FD->
getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
384 uint32_t X = Attr->getMaxNumWorkGroupsX()
388 uint32_t Y = Attr->getMaxNumWorkGroupsY()
389 ? Attr->getMaxNumWorkGroupsY()
393 uint32_t Z = Attr->getMaxNumWorkGroupsZ()
394 ? Attr->getMaxNumWorkGroupsZ()
399 llvm::SmallString<32> AttrVal;
400 llvm::raw_svector_ostream
OS(AttrVal);
401 OS <<
X <<
',' << Y <<
',' << Z;
403 F->addFnAttr(
"amdgpu-max-num-workgroups", AttrVal.str());
406 if (
auto *Attr = FD->
getAttr<CUDAClusterDimsAttr>()) {
407 auto GetExprVal = [&](
const auto &E) {
408 return E ? E->EvaluateKnownConstInt(M.
getContext()).getExtValue() : 1;
410 unsigned X = GetExprVal(Attr->getX());
411 unsigned Y = GetExprVal(Attr->getY());
412 unsigned Z = GetExprVal(Attr->getZ());
413 llvm::SmallString<32> AttrVal;
414 llvm::raw_svector_ostream
OS(AttrVal);
415 OS <<
X <<
',' << Y <<
',' << Z;
416 F->addFnAttr(
"amdgpu-cluster-dims", AttrVal.str());
421 if ((IsOpenCLKernel &&
423 FD->
hasAttr<CUDANoClusterAttr>())
424 F->addFnAttr(
"amdgpu-cluster-dims",
"0,0,0");
427void AMDGPUTargetCodeGenInfo::setTargetAttributes(
428 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M)
const {
430 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
431 GV->setDSOLocal(
true);
434 if (GV->isDeclaration())
437 llvm::Function *F = dyn_cast<llvm::Function>(GV);
441 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
443 setFunctionDeclAttributes(FD, F, M);
445 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
446 F->addFnAttr(
"amdgpu-ieee",
"false");
449unsigned AMDGPUTargetCodeGenInfo::getDeviceKernelCallingConv()
const {
450 return llvm::CallingConv::AMDGPU_KERNEL;
458llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
459 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
462 return llvm::ConstantPointerNull::get(PT);
465 auto NPT = llvm::PointerType::get(
466 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
467 return llvm::ConstantExpr::getAddrSpaceCast(
468 llvm::ConstantPointerNull::get(NPT), PT);
472AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
473 const VarDecl *D)
const {
476 "Address space agnostic languages only");
480 return DefaultGlobalAS;
483 if (AddrSpace != LangAS::Default)
492 return DefaultGlobalAS;
496AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &LangOpts,
498 llvm::AtomicOrdering Ordering,
499 llvm::LLVMContext &Ctx)
const {
502 case SyncScope::HIPSingleThread:
503 case SyncScope::SingleScope:
504 Name =
"singlethread";
506 case SyncScope::HIPWavefront:
507 case SyncScope::OpenCLSubGroup:
508 case SyncScope::WavefrontScope:
511 case SyncScope::HIPWorkgroup:
512 case SyncScope::OpenCLWorkGroup:
513 case SyncScope::WorkgroupScope:
516 case SyncScope::HIPAgent:
517 case SyncScope::OpenCLDevice:
518 case SyncScope::DeviceScope:
521 case SyncScope::SystemScope:
522 case SyncScope::HIPSystem:
523 case SyncScope::OpenCLAllSVMDevices:
530 if (Scope >= SyncScope::OpenCLWorkGroup &&
531 Scope <= SyncScope::OpenCLSubGroup &&
532 Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
534 Name = Twine(Twine(Name) + Twine(
"-")).str();
536 Name = Twine(Twine(Name) + Twine(
"one-as")).str();
539 return Ctx.getOrInsertSyncScopeID(Name);
542void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
543 CodeGenFunction &CGF, llvm::Instruction &AtomicInst,
544 const AtomicExpr *AE)
const {
545 auto *RMW = dyn_cast<llvm::AtomicRMWInst>(&AtomicInst);
546 auto *CmpX = dyn_cast<llvm::AtomicCmpXchgInst>(&AtomicInst);
553 if (((RMW && RMW->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS) ||
555 CmpX->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS)) &&
558 llvm::MDNode *ASRange = MDHelper.createRange(
559 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS),
560 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS + 1));
561 AtomicInst.setMetadata(llvm::LLVMContext::MD_noalias_addrspace, ASRange);
570 RMW->setMetadata(
"amdgpu.no.fine.grained.memory",
Empty);
572 RMW->setMetadata(
"amdgpu.no.remote.memory",
Empty);
574 RMW->getOperation() == llvm::AtomicRMWInst::FAdd &&
575 RMW->getType()->isFloatTy())
576 RMW->setMetadata(
"amdgpu.ignore.denormal.mode",
Empty);
579bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
583bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators()
const {
587void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
588 const FunctionType *&FT)
const {
589 FT = getABIInfo().getContext().adjustFunctionType(
599static llvm::StructType *
601 llvm::Type *KernelDescriptorPtrTy) {
602 llvm::Type *Int32 = llvm::Type::getInt32Ty(
C);
603 return llvm::StructType::create(
C, {KernelDescriptorPtrTy, Int32, Int32},
604 "block.runtime.handle.t");
615llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
616 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy)
const {
620 auto *InvokeFT = Invoke->getFunctionType();
621 llvm::SmallVector<llvm::Type *, 2> ArgTys;
622 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
623 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
624 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
625 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
626 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
627 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
629 ArgTys.push_back(BlockTy);
630 ArgTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
631 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
632 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
633 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
634 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
635 ArgNames.push_back(llvm::MDString::get(
C,
"block_literal"));
636 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
637 ArgTys.push_back(InvokeFT->getParamType(I));
638 ArgTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
639 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
640 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
641 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
642 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
644 llvm::MDString::get(
C, (Twine(
"local_arg") + Twine(I)).str()));
648 const llvm::DataLayout &DL = Mod.getDataLayout();
650 llvm::Twine Name = Invoke->getName() +
"_kernel";
651 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(
C), ArgTys,
false);
655 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
657 F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
659 llvm::AttrBuilder KernelAttrs(
C);
663 F->addFnAttrs(KernelAttrs);
665 auto IP = CGF.
Builder.saveIP();
666 auto *BB = llvm::BasicBlock::Create(
C,
"entry", F);
667 Builder.SetInsertPoint(BB);
668 const auto BlockAlign = DL.getPrefTypeAlign(BlockTy);
669 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
670 BlockPtr->setAlignment(BlockAlign);
671 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
672 auto *
Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
673 llvm::SmallVector<llvm::Value *, 2> Args;
674 Args.push_back(Cast);
675 for (llvm::Argument &A : llvm::drop_begin(F->args()))
677 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
678 call->setCallingConv(Invoke->getCallingConv());
679 Builder.CreateRetVoid();
680 Builder.restoreIP(IP);
682 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(
C, AddressQuals));
683 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(
C, AccessQuals));
684 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(
C, ArgTypeNames));
685 F->setMetadata(
"kernel_arg_base_type",
686 llvm::MDNode::get(
C, ArgBaseTypeNames));
687 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(
C, ArgTypeQuals));
689 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(
C, ArgNames));
692 C, llvm::PointerType::get(
C, DL.getDefaultGlobalsAddressSpace()));
693 llvm::Constant *RuntimeHandleInitializer =
694 llvm::ConstantAggregateZero::get(HandleTy);
696 llvm::Twine RuntimeHandleName = F->getName() +
".runtime.handle";
707 auto *RuntimeHandle =
new llvm::GlobalVariable(
709 true, llvm::GlobalValue::InternalLinkage,
710 RuntimeHandleInitializer, RuntimeHandleName,
711 nullptr, llvm::GlobalValue::NotThreadLocal,
712 DL.getDefaultGlobalsAddressSpace(),
715 llvm::MDNode *HandleAsMD =
716 llvm::MDNode::get(
C, llvm::ValueAsMetadata::get(RuntimeHandle));
717 F->setMetadata(llvm::LLVMContext::MD_associated, HandleAsMD);
719 RuntimeHandle->setSection(
".amdgpu.kernel.runtime.handle");
723 return RuntimeHandle;
727 llvm::Function *F,
const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
728 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
729 int32_t *MaxThreadsVal) {
732 auto Eval = [&](
Expr *E) {
733 return E->EvaluateKnownConstInt(
getContext()).getExtValue();
736 Min = Eval(FlatWGS->getMin());
737 Max = Eval(FlatWGS->getMax());
739 if (ReqdWGS &&
Min == 0 &&
Max == 0)
740 Min =
Max = Eval(ReqdWGS->getXDim()) * Eval(ReqdWGS->getYDim()) *
741 Eval(ReqdWGS->getZDim());
744 assert(
Min <=
Max &&
"Min must be less than or equal Max");
747 *MinThreadsVal =
Min;
749 *MaxThreadsVal =
Max;
750 std::string AttrVal = llvm::utostr(
Min) +
"," + llvm::utostr(
Max);
752 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
754 assert(
Max == 0 &&
"Max must be zero");
758 llvm::Function *F,
const AMDGPUWavesPerEUAttr *
Attr) {
760 Attr->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
763 ?
Attr->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue()
767 assert((
Max == 0 ||
Min <=
Max) &&
"Min must be less than or equal Max");
769 std::string AttrVal = llvm::utostr(
Min);
771 AttrVal = AttrVal +
"," + llvm::utostr(
Max);
772 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
774 assert(
Max == 0 &&
"Max must be zero");
777std::unique_ptr<TargetCodeGenInfo>
779 return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.
getTypes());
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
static llvm::StructType * getAMDGPURuntimeHandleType(llvm::LLVMContext &C, llvm::Type *KernelDescriptorPtrTy)
Return IR struct type for rtinfo struct in rocm-device-libs used for device enqueue.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
const TargetInfo & getTargetInfo() const
unsigned getTargetAddressSpace(LangAS AS) const
bool threadPrivateMemoryAtomicsAreUndefined() const
Return true if atomics operations targeting allocations in private memory are undefined.
Attr - This represents one attribute.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)
Pass this in memory using the IR byref attribute.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
unsigned getNumRequiredArgs() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
void handleAMDGPUWavesPerEUAttr(llvm::Function *F, const AMDGPUWavesPerEUAttr *A)
Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to F.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
void handleAMDGPUFlatWorkGroupSizeAttr(llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A, const ReqdWorkGroupSizeAttr *ReqdWGS=nullptr, int32_t *MinThreadsVal=nullptr, int32_t *MaxThreadsVal=nullptr)
Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute to F.
AtomicOptions getAtomicOpts()
Get the current Atomic options.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
Decl - This represents one declaration (or definition), e.g.
This represents one expression.
ExtInfo withCallingConv(CallingConv cc) const
ExtInfo getExtInfo() const
A (possibly-)qualified type.
LangAS getAddressSpace() const
Return the address space of this type.
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
bool hasFlexibleArrayMember() const
TargetOptions & getTargetOpts() const
Retrieve the target options.
virtual std::optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory.
virtual bool hasFeatureEnabled(const llvm::StringMap< bool > &Features, StringRef Name) const
Check if target has a given feature enabled.
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
const T * getAs() const
Member-template getAs<specific type>'.
bool hasConstantInitialization() const
Determine whether this variable has constant initialization.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "singleelement struct", i.e.
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
@ OS
Indicates that the tracking object is a descendant of a referenced-counted OSObject,...
bool Cast(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
const FunctionProtoType * T
@ Type
The name was classified as a type.
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
U cast(CodeGen::Address addr)
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool getOption(AtomicOptionKind Kind) const