10#include "TargetInfo.h"
11#include "llvm/ADT/StringExtras.h"
12#include "llvm/Support/AMDGPUAddrSpace.h"
25 static const unsigned MaxNumRegsForArgsRet = 16;
27 uint64_t numRegsForType(QualType Ty)
const;
29 bool isHomogeneousAggregateBaseType(QualType Ty)
const override;
30 bool isHomogeneousAggregateSmallEnough(
const Type *Base,
31 uint64_t Members)
const override;
34 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty,
unsigned FromAS,
35 unsigned ToAS)
const {
37 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
38 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
39 return llvm::PointerType::get(Ty->getContext(), ToAS);
44 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
45 DefaultABIInfo(CGT) {}
48 ABIArgInfo classifyKernelArgumentType(QualType Ty)
const;
50 unsigned &NumRegsLeft)
const;
52 void computeInfo(CGFunctionInfo &FI)
const override;
53 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
54 AggValueSlot Slot)
const override;
56 llvm::FixedVectorType *
57 getOptimalVectorMemoryType(llvm::FixedVectorType *T,
58 const LangOptions &Opt)
const override {
62 if (T->getNumElements() == 3 && getDataLayout().getTypeSizeInBits(T) == 96)
64 return DefaultABIInfo::getOptimalVectorMemoryType(T, Opt);
68bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
72bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
73 const Type *Base, uint64_t Members)
const {
74 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
77 return Members * NumRegs <= MaxNumRegsForArgsRet;
81uint64_t AMDGPUABIInfo::numRegsForType(QualType Ty)
const {
84 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
87 QualType EltTy = VT->getElementType();
88 uint64_t EltSize = getContext().getTypeSize(EltTy);
92 return (VT->getNumElements() + 1) / 2;
94 uint64_t EltNumRegs = (EltSize + 31) / 32;
95 return EltNumRegs * VT->getNumElements();
99 assert(!RD->hasFlexibleArrayMember());
101 for (
const FieldDecl *Field : RD->fields()) {
102 QualType FieldTy =
Field->getType();
103 NumRegs += numRegsForType(FieldTy);
109 return (getContext().getTypeSize(Ty) + 31) / 32;
112void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI)
const {
118 unsigned ArgumentIndex = 0;
121 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
123 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
124 Arg.info = classifyKernelArgumentType(Arg.type);
126 bool FixedArgument = ArgumentIndex++ < numFixedArguments;
132RValue AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
133 QualType Ty, AggValueSlot Slot)
const {
134 const bool IsIndirect =
false;
135 const bool AllowHigherAlign =
false;
137 getContext().getTypeInfoInChars(Ty),
141ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy)
const {
167 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
171 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
182ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty)
const {
188 Ty = QualType(SeltTy, 0);
190 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
191 llvm::Type *LTy = OrigLTy;
192 if (getContext().getLangOpts().
HIP) {
193 LTy = coerceKernelArgumentType(
194 OrigLTy, getContext().getTargetAddressSpace(LangAS::Default),
195 getContext().getTargetAddressSpace(LangAS::cuda_device));
203 getContext().getTypeAlignInChars(Ty),
204 getContext().getTargetAddressSpace(LangAS::opencl_constant),
214ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
bool Variadic,
215 unsigned &NumRegsLeft)
const {
216 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
232 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
252 unsigned NumRegs = (
Size + 31) / 32;
253 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
262 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
266 if (NumRegsLeft > 0) {
267 uint64_t NumRegs = numRegsForType(Ty);
268 if (NumRegsLeft >= NumRegs) {
269 NumRegsLeft -= NumRegs;
277 getContext().getTypeAlignInChars(Ty),
278 getContext().getTargetAddressSpace(LangAS::opencl_private));
284 uint64_t NumRegs = numRegsForType(Ty);
285 NumRegsLeft -= std::min(NumRegs, uint64_t{NumRegsLeft});
291class AMDGPUTargetCodeGenInfo :
public TargetCodeGenInfo {
293 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
294 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
296 bool supportsLibCall()
const override {
return false; }
297 void setFunctionDeclAttributes(
const FunctionDecl *FD, llvm::Function *F,
298 CodeGenModule &CGM)
const;
300 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
301 CodeGen::CodeGenModule &M)
const override;
302 unsigned getDeviceKernelCallingConv()
const override;
304 llvm::Constant *getNullPointer(
const CodeGen::CodeGenModule &CGM,
305 llvm::PointerType *T, QualType QT)
const override;
307 LangAS getASTAllocaAddressSpace()
const override {
309 getABIInfo().getDataLayout().getAllocaAddrSpace());
311 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
312 const VarDecl *D)
const override;
313 StringRef getLLVMSyncScopeStr(
const LangOptions &LangOpts,
SyncScope Scope,
314 llvm::AtomicOrdering Ordering)
const override;
315 void setTargetAtomicMetadata(CodeGenFunction &CGF,
316 llvm::Instruction &AtomicInst,
317 const AtomicExpr *Expr =
nullptr)
const override;
318 llvm::Value *createEnqueuedBlockKernel(CodeGenFunction &CGF,
319 llvm::Function *BlockInvokeFunc,
320 llvm::Type *BlockTy)
const override;
321 bool shouldEmitStaticExternCAliases()
const override;
322 bool shouldEmitDWARFBitFieldSeparators()
const override;
328 llvm::GlobalValue *GV) {
329 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
332 return !D->
hasAttr<OMPDeclareTargetDeclAttr>() &&
333 (D->
hasAttr<DeviceKernelAttr>() ||
336 (D->
hasAttr<CUDADeviceAttr>() || D->
hasAttr<CUDAConstantAttr>() ||
337 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
338 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType())));
341void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
342 const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M)
const {
343 const auto *ReqdWGS =
345 const bool IsOpenCLKernel =
349 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
350 if (ReqdWGS || FlatWGS) {
352 }
else if (IsOpenCLKernel || IsHIPKernel) {
355 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
356 const unsigned DefaultMaxWorkGroupSize =
357 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
359 std::string AttrVal =
360 std::string(
"1,") + llvm::utostr(DefaultMaxWorkGroupSize);
361 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
364 if (
const auto *Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>())
367 if (
const auto *Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
368 unsigned NumSGPR = Attr->getNumSGPR();
371 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
374 if (
const auto *Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
375 uint32_t NumVGPR = Attr->getNumVGPR();
378 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
381 if (
const auto *Attr = FD->
getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
382 uint32_t X = Attr->getMaxNumWorkGroupsX()
386 uint32_t Y = Attr->getMaxNumWorkGroupsY()
387 ? Attr->getMaxNumWorkGroupsY()
391 uint32_t Z = Attr->getMaxNumWorkGroupsZ()
392 ? Attr->getMaxNumWorkGroupsZ()
397 llvm::SmallString<32> AttrVal;
398 llvm::raw_svector_ostream
OS(AttrVal);
399 OS <<
X <<
',' << Y <<
',' << Z;
401 F->addFnAttr(
"amdgpu-max-num-workgroups", AttrVal.str());
404 if (
auto *Attr = FD->
getAttr<CUDAClusterDimsAttr>()) {
405 auto GetExprVal = [&](
const auto &E) {
406 return E ? E->EvaluateKnownConstInt(M.
getContext()).getExtValue() : 1;
408 unsigned X = GetExprVal(Attr->getX());
409 unsigned Y = GetExprVal(Attr->getY());
410 unsigned Z = GetExprVal(Attr->getZ());
411 llvm::SmallString<32> AttrVal;
412 llvm::raw_svector_ostream
OS(AttrVal);
413 OS <<
X <<
',' << Y <<
',' << Z;
414 F->addFnAttr(
"amdgpu-cluster-dims", AttrVal.str());
419 if ((IsOpenCLKernel &&
421 FD->
hasAttr<CUDANoClusterAttr>())
422 F->addFnAttr(
"amdgpu-cluster-dims",
"0,0,0");
425void AMDGPUTargetCodeGenInfo::setTargetAttributes(
426 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M)
const {
428 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
429 GV->setDSOLocal(
true);
432 if (GV->isDeclaration())
435 llvm::Function *F = dyn_cast<llvm::Function>(GV);
439 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
441 setFunctionDeclAttributes(FD, F, M);
442 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
443 F->addFnAttr(
"amdgpu-ieee",
"false");
444 if (getABIInfo().getCodeGenOpts().AMDGPUExpandWaitcntProfiling)
445 F->addFnAttr(
"amdgpu-expand-waitcnt-profiling");
448unsigned AMDGPUTargetCodeGenInfo::getDeviceKernelCallingConv()
const {
449 return llvm::CallingConv::AMDGPU_KERNEL;
457llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
458 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
461 return llvm::ConstantPointerNull::get(PT);
464 auto NPT = llvm::PointerType::get(
465 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
466 return llvm::ConstantExpr::getAddrSpaceCast(
467 llvm::ConstantPointerNull::get(NPT), PT);
471AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
472 const VarDecl *D)
const {
475 "Address space agnostic languages only");
479 return DefaultGlobalAS;
482 if (AddrSpace != LangAS::Default)
491 return DefaultGlobalAS;
494StringRef AMDGPUTargetCodeGenInfo::getLLVMSyncScopeStr(
495 const LangOptions &LangOpts,
SyncScope Scope,
496 llvm::AtomicOrdering Ordering)
const {
500 bool IsOneAs = (Scope >= SyncScope::OpenCLWorkGroup &&
501 Scope <= SyncScope::OpenCLSubGroup &&
502 Ordering != llvm::AtomicOrdering::SequentiallyConsistent);
505 case SyncScope::HIPSingleThread:
506 case SyncScope::SingleScope:
507 return IsOneAs ?
"singlethread-one-as" :
"singlethread";
508 case SyncScope::HIPWavefront:
509 case SyncScope::OpenCLSubGroup:
510 case SyncScope::WavefrontScope:
511 return IsOneAs ?
"wavefront-one-as" :
"wavefront";
512 case SyncScope::HIPCluster:
513 case SyncScope::ClusterScope:
514 return IsOneAs ?
"cluster-one-as" :
"cluster";
515 case SyncScope::HIPWorkgroup:
516 case SyncScope::OpenCLWorkGroup:
517 case SyncScope::WorkgroupScope:
518 return IsOneAs ?
"workgroup-one-as" :
"workgroup";
519 case SyncScope::HIPAgent:
520 case SyncScope::OpenCLDevice:
521 case SyncScope::DeviceScope:
522 return IsOneAs ?
"agent-one-as" :
"agent";
523 case SyncScope::SystemScope:
524 case SyncScope::HIPSystem:
525 case SyncScope::OpenCLAllSVMDevices:
526 return IsOneAs ?
"one-as" :
"";
528 llvm_unreachable(
"Unknown SyncScope enum");
531void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
532 CodeGenFunction &CGF, llvm::Instruction &AtomicInst,
533 const AtomicExpr *AE)
const {
534 auto *RMW = dyn_cast<llvm::AtomicRMWInst>(&AtomicInst);
535 auto *CmpX = dyn_cast<llvm::AtomicCmpXchgInst>(&AtomicInst);
542 if (((RMW && RMW->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS) ||
544 CmpX->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS)) &&
547 llvm::MDNode *ASRange = MDHelper.createRange(
548 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS),
549 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS + 1));
550 AtomicInst.setMetadata(llvm::LLVMContext::MD_noalias_addrspace, ASRange);
559 RMW->setMetadata(
"amdgpu.no.fine.grained.memory",
Empty);
561 RMW->setMetadata(
"amdgpu.no.remote.memory",
Empty);
563 RMW->getOperation() == llvm::AtomicRMWInst::FAdd &&
564 RMW->getType()->isFloatTy())
565 RMW->setMetadata(
"amdgpu.ignore.denormal.mode",
Empty);
568bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
572bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators()
const {
576void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
577 const FunctionType *&FT)
const {
578 FT = getABIInfo().getContext().adjustFunctionType(
588static llvm::StructType *
590 llvm::Type *KernelDescriptorPtrTy) {
591 llvm::Type *Int32 = llvm::Type::getInt32Ty(
C);
592 return llvm::StructType::create(
C, {KernelDescriptorPtrTy, Int32, Int32},
593 "block.runtime.handle.t");
604llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
605 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy)
const {
609 auto *InvokeFT = Invoke->getFunctionType();
610 llvm::SmallVector<llvm::Type *, 2> ArgTys;
611 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
612 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
613 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
614 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
615 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
616 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
618 ArgTys.push_back(BlockTy);
619 ArgTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
620 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
621 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
622 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
623 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
624 ArgNames.push_back(llvm::MDString::get(
C,
"block_literal"));
625 for (
unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
626 ArgTys.push_back(InvokeFT->getParamType(I));
627 ArgTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
628 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
629 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
630 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
631 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
633 llvm::MDString::get(
C, (Twine(
"local_arg") + Twine(I)).str()));
637 const llvm::DataLayout &DL = Mod.getDataLayout();
639 llvm::Twine Name = Invoke->getName() +
"_kernel";
640 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(
C), ArgTys,
false);
644 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
646 F->setCallingConv(getDeviceKernelCallingConv());
648 llvm::AttrBuilder KernelAttrs(
C);
652 F->addFnAttrs(KernelAttrs);
654 auto IP = CGF.
Builder.saveIP();
655 auto *BB = llvm::BasicBlock::Create(
C,
"entry", F);
656 Builder.SetInsertPoint(BB);
657 const auto BlockAlign = DL.getPrefTypeAlign(BlockTy);
658 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
659 BlockPtr->setAlignment(BlockAlign);
660 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
661 auto *
Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
662 llvm::SmallVector<llvm::Value *, 2> Args;
663 Args.push_back(Cast);
664 for (llvm::Argument &A : llvm::drop_begin(F->args()))
666 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
667 call->setCallingConv(Invoke->getCallingConv());
668 Builder.CreateRetVoid();
669 Builder.restoreIP(IP);
671 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(
C, AddressQuals));
672 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(
C, AccessQuals));
673 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(
C, ArgTypeNames));
674 F->setMetadata(
"kernel_arg_base_type",
675 llvm::MDNode::get(
C, ArgBaseTypeNames));
676 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(
C, ArgTypeQuals));
678 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(
C, ArgNames));
681 C, llvm::PointerType::get(
C, DL.getDefaultGlobalsAddressSpace()));
682 llvm::Constant *RuntimeHandleInitializer =
683 llvm::ConstantAggregateZero::get(HandleTy);
685 llvm::Twine RuntimeHandleName = F->getName() +
".runtime.handle";
696 auto *RuntimeHandle =
new llvm::GlobalVariable(
698 true, llvm::GlobalValue::InternalLinkage,
699 RuntimeHandleInitializer, RuntimeHandleName,
700 nullptr, llvm::GlobalValue::NotThreadLocal,
701 DL.getDefaultGlobalsAddressSpace(),
704 llvm::MDNode *HandleAsMD =
705 llvm::MDNode::get(
C, llvm::ValueAsMetadata::get(RuntimeHandle));
706 F->setMetadata(llvm::LLVMContext::MD_associated, HandleAsMD);
708 RuntimeHandle->setSection(
".amdgpu.kernel.runtime.handle");
712 return RuntimeHandle;
716 llvm::Function *F,
const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
717 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
718 int32_t *MaxThreadsVal) {
721 auto Eval = [&](
Expr *E) {
722 return E->EvaluateKnownConstInt(
getContext()).getExtValue();
725 Min = Eval(FlatWGS->getMin());
726 Max = Eval(FlatWGS->getMax());
728 if (ReqdWGS &&
Min == 0 &&
Max == 0)
729 Min =
Max = Eval(ReqdWGS->getXDim()) * Eval(ReqdWGS->getYDim()) *
730 Eval(ReqdWGS->getZDim());
733 assert(
Min <=
Max &&
"Min must be less than or equal Max");
736 *MinThreadsVal =
Min;
738 *MaxThreadsVal =
Max;
739 std::string AttrVal = llvm::utostr(
Min) +
"," + llvm::utostr(
Max);
741 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
743 assert(
Max == 0 &&
"Max must be zero");
747 llvm::Function *F,
const AMDGPUWavesPerEUAttr *
Attr) {
749 Attr->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
752 ?
Attr->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue()
756 assert((
Max == 0 ||
Min <=
Max) &&
"Min must be less than or equal Max");
758 std::string AttrVal = llvm::utostr(
Min);
760 AttrVal = AttrVal +
"," + llvm::utostr(
Max);
761 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
763 assert(
Max == 0 &&
"Max must be zero");
766std::unique_ptr<TargetCodeGenInfo>
768 return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.
getTypes());
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
static llvm::StructType * getAMDGPURuntimeHandleType(llvm::LLVMContext &C, llvm::Type *KernelDescriptorPtrTy)
Return IR struct type for rtinfo struct in rocm-device-libs used for device enqueue.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
const TargetInfo & getTargetInfo() const
unsigned getTargetAddressSpace(LangAS AS) const
bool threadPrivateMemoryAtomicsAreUndefined() const
Return true if atomics operations targeting allocations in private memory are undefined.
Attr - This represents one attribute.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)
Pass this in memory using the IR byref attribute.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
unsigned getNumRequiredArgs() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
void handleAMDGPUWavesPerEUAttr(llvm::Function *F, const AMDGPUWavesPerEUAttr *A)
Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to F.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
void handleAMDGPUFlatWorkGroupSizeAttr(llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A, const ReqdWorkGroupSizeAttr *ReqdWGS=nullptr, int32_t *MinThreadsVal=nullptr, int32_t *MaxThreadsVal=nullptr)
Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute to F.
AtomicOptions getAtomicOpts()
Get the current Atomic options.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
Decl - This represents one declaration (or definition), e.g.
This represents one expression.
ExtInfo withCallingConv(CallingConv cc) const
ExtInfo getExtInfo() const
A (possibly-)qualified type.
LangAS getAddressSpace() const
Return the address space of this type.
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
bool hasFlexibleArrayMember() const
TargetOptions & getTargetOpts() const
Retrieve the target options.
virtual std::optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory.
virtual bool hasFeatureEnabled(const llvm::StringMap< bool > &Features, StringRef Name) const
Check if target has a given feature enabled.
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
const T * getAs() const
Member-template getAs<specific type>'.
bool hasConstantInitialization() const
Determine whether this variable has constant initialization.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "singleelement struct", i.e.
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
@ OS
Indicates that the tracking object is a descendant of a referenced-counted OSObject,...
bool Cast(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
@ Type
The name was classified as a type.
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
U cast(CodeGen::Address addr)
LangAS getLangASFromTargetAS(unsigned TargetAS)
bool getOption(AtomicOptionKind Kind) const