11#include "TargetInfo.h"
24 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
30class SPIRVABIInfo :
public CommonSPIRABIInfo {
32 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
33 void computeInfo(CGFunctionInfo &FI)
const override;
37 ABIArgInfo classifyKernelArgumentType(QualType Ty)
const;
44 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
45 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
46 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
47 : TargetCodeGenInfo(std::move(ABIInfo)) {}
49 LangAS getASTAllocaAddressSpace()
const override {
51 getABIInfo().getDataLayout().getAllocaAddrSpace());
54 unsigned getDeviceKernelCallingConv()
const override;
55 llvm::Type *getOpenCLType(CodeGenModule &CGM,
const Type *
T)
const override;
57 getHLSLType(CodeGenModule &CGM,
const Type *Ty,
58 const SmallVector<int32_t> *Packoffsets =
nullptr)
const override;
59 llvm::Type *getSPIRVImageTypeFromHLSLResource(
60 const HLSLAttributedResourceType::Attributes &attributes,
61 QualType SampledType, CodeGenModule &CGM)
const;
63 setOCLKernelStubCallingConvention(
const FunctionType *&FT)
const override;
64 llvm::Constant *getNullPointer(
const CodeGen::CodeGenModule &CGM,
66 QualType QT)
const override;
67 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
68 CodeGen::CodeGenModule &M)
const override;
70class SPIRVTargetCodeGenInfo :
public CommonSPIRTargetCodeGenInfo {
72 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
73 : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
75 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
76 const VarDecl *D)
const override;
77 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
78 CodeGen::CodeGenModule &M)
const override;
79 llvm::SyncScope::ID getLLVMSyncScopeID(
const LangOptions &LangOpts,
81 llvm::AtomicOrdering Ordering,
82 llvm::LLVMContext &Ctx)
const override;
83 bool supportsLibCall()
const override {
84 return getABIInfo().getTarget().getTriple().getVendor() !=
93 return "singlethread";
117void CommonSPIRABIInfo::setCCs() {
118 assert(getRuntimeCC() == llvm::CallingConv::C);
119 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
122ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy)
const {
123 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
141ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty)
const {
142 if (getContext().getLangOpts().isTargetDevice()) {
147 llvm::Type *LTy = CGT.ConvertType(Ty);
148 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
149 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::opencl_global);
150 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
151 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
152 LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
157 if (getTarget().getTriple().getVendor() == llvm::Triple::AMD)
177 return getNaturalAlignIndirect(Ty, 0,
true);
183ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty)
const {
184 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
192 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
202void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI)
const {
211 if (CC == llvm::CallingConv::SPIR_KERNEL) {
212 I.info = classifyKernelArgumentType(I.type);
223 SPIRVABIInfo(CGM.
getTypes()).computeInfo(FI);
225 CommonSPIRABIInfo(CGM.
getTypes()).computeInfo(FI);
230unsigned CommonSPIRTargetCodeGenInfo::getDeviceKernelCallingConv()
const {
231 return llvm::CallingConv::SPIR_KERNEL;
234void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
235 const FunctionType *&FT)
const {
237 if (getABIInfo().getContext().getLangOpts().
HIP) {
238 FT = getABIInfo().getContext().adjustFunctionType(
244void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention(
245 const FunctionType *&FT)
const {
246 FT = getABIInfo().getContext().adjustFunctionType(
257CommonSPIRTargetCodeGenInfo::getNullPointer(
const CodeGen::CodeGenModule &CGM,
258 llvm::PointerType *PT,
263 if (AS == LangAS::Default || AS == LangAS::opencl_generic ||
264 AS == LangAS::opencl_constant)
265 return llvm::ConstantPointerNull::get(PT);
268 auto NPT = llvm::PointerType::get(
269 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
270 return llvm::ConstantExpr::getAddrSpaceCast(
271 llvm::ConstantPointerNull::get(NPT), PT);
274void CommonSPIRTargetCodeGenInfo::setTargetAttributes(
275 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M)
const {
279 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
283 llvm::Function *F = dyn_cast<llvm::Function>(GV);
284 assert(F &&
"Expected GlobalValue to be a Function");
286 if (FD->
hasAttr<DeviceKernelAttr>())
287 F->setCallingConv(getDeviceKernelCallingConv());
291SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
292 const VarDecl *D)
const {
295 "Address space agnostic languages only");
303 return DefaultGlobalAS;
306 if (AddrSpace != LangAS::Default)
309 return DefaultGlobalAS;
312void SPIRVTargetCodeGenInfo::setTargetAttributes(
313 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M)
const {
314 if (GV->isDeclaration())
317 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
321 llvm::Function *F = dyn_cast<llvm::Function>(GV);
322 assert(F &&
"Expected GlobalValue to be a Function");
324 if (FD->
hasAttr<DeviceKernelAttr>())
325 F->setCallingConv(getDeviceKernelCallingConv());
331 if (!FD->
hasAttr<CUDAGlobalAttr>())
334 unsigned N = M.
getLangOpts().GPUMaxThreadsPerBlock;
335 if (
auto FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>())
336 N = FlatWGS->getMax()->EvaluateKnownConstInt(M.
getContext()).getExtValue();
342 llvm::Metadata *AttrMDArgs[] = {
343 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, N)),
344 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1)),
345 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1))};
347 F->setMetadata(
"max_work_group_size",
352SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &,
SyncScope Scope,
353 llvm::AtomicOrdering,
354 llvm::LLVMContext &Ctx)
const {
355 return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
360 StringRef OpenCLName,
361 unsigned AccessQualifier) {
372 if (OpenCLName.starts_with(
"image2d"))
374 else if (OpenCLName.starts_with(
"image3d"))
376 else if (OpenCLName ==
"image1d_buffer")
379 assert(OpenCLName.starts_with(
"image1d") &&
"Unknown image type");
384 if (OpenCLName.contains(
"_depth"))
386 if (OpenCLName.contains(
"_array"))
388 if (OpenCLName.contains(
"_msaa"))
392 IntParams.push_back(AccessQualifier);
394 return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
398llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
399 const Type *Ty)
const {
401 if (
auto *PipeTy = dyn_cast<PipeType>(Ty))
402 return llvm::TargetExtType::get(Ctx,
"spirv.Pipe", {},
403 {!PipeTy->isReadOnly()});
404 if (
auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
405 enum AccessQualifier :
unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
406 switch (BuiltinTy->getKind()) {
407#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
408 case BuiltinType::Id: \
409 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
410#include "clang/Basic/OpenCLImageTypes.def"
411 case BuiltinType::OCLSampler:
412 return llvm::TargetExtType::get(Ctx,
"spirv.Sampler");
413 case BuiltinType::OCLEvent:
414 return llvm::TargetExtType::get(Ctx,
"spirv.Event");
415 case BuiltinType::OCLClkEvent:
416 return llvm::TargetExtType::get(Ctx,
"spirv.DeviceEvent");
417 case BuiltinType::OCLQueue:
418 return llvm::TargetExtType::get(Ctx,
"spirv.Queue");
419 case BuiltinType::OCLReserveID:
420 return llvm::TargetExtType::get(Ctx,
"spirv.ReserveId");
421#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
422 case BuiltinType::OCLIntelSubgroupAVC##Id: \
423 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
424#include "clang/Basic/OpenCLExtensionTypes.def"
436 llvm::Type *IntegralType,
443 while (
Value.ugt(0)) {
444 uint32_t Word =
Value.trunc(32).getZExtValue();
445 Value.lshrInPlace(32);
447 Words.push_back(Word);
449 if (Words.size() == 0)
453 return llvm::TargetExtType::get(Ctx,
"spirv.IntegralConstant",
454 {IntegralType}, Words);
455 return llvm::TargetExtType::get(Ctx,
"spirv.Literal", {}, Words);
459 const HLSLInlineSpirvType *SpirvType) {
464 for (
auto &Operand : SpirvType->getOperands()) {
465 using SpirvOperandKind = SpirvOperand::SpirvOperandKind;
467 llvm::Type *Result =
nullptr;
468 switch (Operand.getKind()) {
469 case SpirvOperandKind::ConstantId: {
470 llvm::Type *IntegralType =
476 case SpirvOperandKind::Literal: {
480 case SpirvOperandKind::TypeId: {
481 QualType TypeOperand = Operand.getResultType();
483 assert(RD->isCompleteDefinition() &&
484 "Type completion should have been required in Sema");
486 const FieldDecl *HandleField = RD->findFirstNamedDataMember();
489 if (ResourceType->
getAs<HLSLAttributedResourceType>()) {
490 TypeOperand = ResourceType;
498 llvm_unreachable(
"HLSLInlineSpirvType had invalid operand!");
503 Operands.push_back(Result);
506 return llvm::TargetExtType::get(Ctx,
"spirv.Type", Operands,
507 {SpirvType->getOpcode(), SpirvType->getSize(),
508 SpirvType->getAlignment()});
511llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
512 CodeGenModule &CGM,
const Type *Ty,
513 const SmallVector<int32_t> *Packoffsets)
const {
516 if (
auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Ty))
519 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
523 const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs();
524 switch (ResAttrs.ResourceClass) {
525 case llvm::dxil::ResourceClass::UAV:
526 case llvm::dxil::ResourceClass::SRV: {
528 QualType ContainedTy = ResType->getContainedType();
532 assert(!ResAttrs.IsROV &&
533 "Rasterizer order views not implemented for SPIR-V yet");
535 if (!ResAttrs.RawBuffer) {
537 return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM);
540 if (ResAttrs.IsCounter) {
541 llvm::Type *ElemType = llvm::Type::getInt32Ty(Ctx);
543 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer", {ElemType},
547 llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0);
549 bool IsWritable = ResAttrs.ResourceClass == llvm::dxil::ResourceClass::UAV;
550 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer",
554 case llvm::dxil::ResourceClass::CBuffer: {
555 QualType ContainedTy = ResType->getContainedType();
559 llvm::Type *BufferLayoutTy =
560 HLSLBufferLayoutBuilder(CGM,
"spirv.Layout")
564 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer", {BufferLayoutTy},
568 case llvm::dxil::ResourceClass::Sampler:
569 return llvm::TargetExtType::get(Ctx,
"spirv.Sampler");
576 const HLSLAttributedResourceType::Attributes &attributes,
577 llvm::Type *SampledType,
QualType Ty,
unsigned NumChannels) {
582 if (LangOpts.HLSLSpvUseUnknownImageFormat ||
583 attributes.ResourceClass != llvm::dxil::ResourceClass::UAV) {
587 if (SampledType->isIntegerTy(32)) {
589 if (NumChannels == 1)
591 if (NumChannels == 2)
593 if (NumChannels == 4)
596 if (NumChannels == 1)
598 if (NumChannels == 2)
600 if (NumChannels == 4)
603 }
else if (SampledType->isIntegerTy(64)) {
604 if (NumChannels == 1) {
610 }
else if (SampledType->isFloatTy()) {
611 if (NumChannels == 1)
613 if (NumChannels == 2)
615 if (NumChannels == 4)
622llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
623 const HLSLAttributedResourceType::Attributes &attributes, QualType Ty,
624 CodeGenModule &CGM)
const {
627 unsigned NumChannels = 1;
629 if (
const VectorType *
V = dyn_cast<VectorType>(Ty)) {
630 NumChannels =
V->getNumElements();
631 Ty =
V->getElementType();
633 assert(!Ty->
isVectorType() &&
"We still have a vector type.");
637 assert((SampledType->isIntegerTy() || SampledType->isFloatingPointTy()) &&
638 "The element type for a SPIR-V resource must be a scalar integer or "
639 "floating point type.");
644 SmallVector<unsigned, 6> IntParams(6, 0);
665 attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
671 llvm::TargetExtType *ImageType =
672 llvm::TargetExtType::get(Ctx, Name, {SampledType}, IntParams);
676std::unique_ptr<TargetCodeGenInfo>
678 return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.
getTypes());
681std::unique_ptr<TargetCodeGenInfo>
683 return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.
getTypes());
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Type * getInlineSpirvType(CodeGenModule &CGM, const HLSLInlineSpirvType *SpirvType)
static llvm::Type * getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, StringRef OpenCLName, unsigned AccessQualifier)
Construct a SPIR-V target extension type for the given OpenCL image type.
static unsigned getImageFormat(const LangOptions &LangOpts, const HLSLAttributedResourceType::Attributes &attributes, llvm::Type *SampledType, QualType Ty, unsigned NumChannels)
static llvm::Type * getInlineSpirvConstant(CodeGenModule &CGM, llvm::Type *IntegralType, llvm::APInt Value)
unsigned getTargetAddressSpace(LangAS AS) const
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Represents a member of a struct/union/class.
ExtInfo withCallingConv(CallingConv cc) const
ExtInfo getExtInfo() const
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
bool hasFlexibleArrayMember() const
Scope - A scope is a transient data structure that is used while parsing the program.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isStructureType() const
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
CanQualType getCanonicalTypeUnqualified() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isVectorType() const
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
const T * getAs() const
Member-template getAs<specific type>'.
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
bool isNullPtrType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createSPIRVTargetCodeGenInfo(CodeGenModule &CGM)
std::unique_ptr< TargetCodeGenInfo > createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM)
The JSON file list parser is used to communicate input to InstallAPI.
StorageClass
Storage classes.
const FunctionProtoType * T
@ Type
The name was classified as a type.
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
LangAS getLangASFromTargetAS(unsigned TargetAS)