11#include "TargetInfo.h"
24 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
30class SPIRVABIInfo :
public CommonSPIRABIInfo {
32 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
33 void computeInfo(CGFunctionInfo &FI)
const override;
37 ABIArgInfo classifyKernelArgumentType(QualType Ty)
const;
44 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
45 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
46 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
47 : TargetCodeGenInfo(std::move(ABIInfo)) {}
49 LangAS getASTAllocaAddressSpace()
const override {
51 getABIInfo().getDataLayout().getAllocaAddrSpace());
54 unsigned getDeviceKernelCallingConv()
const override;
55 llvm::Type *getOpenCLType(CodeGenModule &CGM,
const Type *
T)
const override;
56 llvm::Type *getHLSLType(CodeGenModule &CGM,
const Type *Ty,
57 const CGHLSLOffsetInfo &OffsetInfo)
const override;
59 llvm::Type *getHLSLPadding(CodeGenModule &CGM,
60 CharUnits NumBytes)
const override {
62 return llvm::TargetExtType::get(CGM.
getLLVMContext(),
"spirv.Padding", {},
66 bool isHLSLPadding(llvm::Type *Ty)
const override {
67 if (
auto *TET = dyn_cast<llvm::TargetExtType>(Ty))
68 return TET->getName() ==
"spirv.Padding";
72 llvm::Type *getSPIRVImageTypeFromHLSLResource(
73 const HLSLAttributedResourceType::Attributes &attributes,
74 QualType SampledType, CodeGenModule &CGM)
const;
76 setOCLKernelStubCallingConvention(
const FunctionType *&FT)
const override;
77 llvm::Constant *getNullPointer(
const CodeGen::CodeGenModule &CGM,
79 QualType QT)
const override;
80 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
81 CodeGen::CodeGenModule &M)
const override;
83class SPIRVTargetCodeGenInfo :
public CommonSPIRTargetCodeGenInfo {
85 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
86 : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
88 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
89 const VarDecl *D)
const override;
90 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
91 CodeGen::CodeGenModule &M)
const override;
92 llvm::SyncScope::ID getLLVMSyncScopeID(
const LangOptions &LangOpts,
94 llvm::AtomicOrdering Ordering,
95 llvm::LLVMContext &Ctx)
const override;
96 bool supportsLibCall()
const override {
97 return getABIInfo().getTarget().getTriple().getVendor() !=
106 return "singlethread";
130void CommonSPIRABIInfo::setCCs() {
131 assert(getRuntimeCC() == llvm::CallingConv::C);
132 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
135ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy)
const {
136 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
154ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty)
const {
155 if (getContext().getLangOpts().isTargetDevice()) {
160 llvm::Type *LTy = CGT.ConvertType(Ty);
161 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
162 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::opencl_global);
163 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
164 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
165 LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
170 if (getTarget().getTriple().getVendor() == llvm::Triple::AMD)
190 return getNaturalAlignIndirect(Ty, 0,
true);
196ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty)
const {
197 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
205 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
215void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI)
const {
224 if (CC == llvm::CallingConv::SPIR_KERNEL) {
225 I.info = classifyKernelArgumentType(I.type);
236 SPIRVABIInfo(CGM.
getTypes()).computeInfo(FI);
238 CommonSPIRABIInfo(CGM.
getTypes()).computeInfo(FI);
243unsigned CommonSPIRTargetCodeGenInfo::getDeviceKernelCallingConv()
const {
244 return llvm::CallingConv::SPIR_KERNEL;
247void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
248 const FunctionType *&FT)
const {
250 if (getABIInfo().getContext().getLangOpts().
HIP) {
251 FT = getABIInfo().getContext().adjustFunctionType(
257void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention(
258 const FunctionType *&FT)
const {
259 FT = getABIInfo().getContext().adjustFunctionType(
270CommonSPIRTargetCodeGenInfo::getNullPointer(
const CodeGen::CodeGenModule &CGM,
271 llvm::PointerType *PT,
276 unsigned ASAsInt =
static_cast<unsigned>(AS);
277 unsigned FirstTargetASAsInt =
278 static_cast<unsigned>(LangAS::FirstTargetAddressSpace);
279 unsigned CodeSectionINTELAS = FirstTargetASAsInt + 9;
282 bool IsFunctionPtrAS =
283 CGM.
getTriple().isSPIRV() && ASAsInt == CodeSectionINTELAS;
284 if (AS == LangAS::Default || AS == LangAS::opencl_generic ||
285 AS == LangAS::opencl_constant || IsFunctionPtrAS)
286 return llvm::ConstantPointerNull::get(PT);
289 auto NPT = llvm::PointerType::get(
290 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
291 return llvm::ConstantExpr::getAddrSpaceCast(
292 llvm::ConstantPointerNull::get(NPT), PT);
295void CommonSPIRTargetCodeGenInfo::setTargetAttributes(
296 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M)
const {
300 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
304 llvm::Function *F = dyn_cast<llvm::Function>(GV);
305 assert(F &&
"Expected GlobalValue to be a Function");
307 if (FD->
hasAttr<DeviceKernelAttr>())
308 F->setCallingConv(getDeviceKernelCallingConv());
312SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
313 const VarDecl *D)
const {
316 "Address space agnostic languages only");
324 return DefaultGlobalAS;
327 if (AddrSpace != LangAS::Default)
330 return DefaultGlobalAS;
333void SPIRVTargetCodeGenInfo::setTargetAttributes(
334 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M)
const {
335 if (GV->isDeclaration())
338 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
342 llvm::Function *F = dyn_cast<llvm::Function>(GV);
343 assert(F &&
"Expected GlobalValue to be a Function");
345 if (FD->
hasAttr<DeviceKernelAttr>())
346 F->setCallingConv(getDeviceKernelCallingConv());
352 if (!FD->
hasAttr<CUDAGlobalAttr>())
355 unsigned N = M.
getLangOpts().GPUMaxThreadsPerBlock;
356 if (
auto FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>())
357 N = FlatWGS->getMax()->EvaluateKnownConstInt(M.
getContext()).getExtValue();
363 llvm::Metadata *AttrMDArgs[] = {
364 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, N)),
365 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1)),
366 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1))};
368 F->setMetadata(
"max_work_group_size",
373SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &,
SyncScope Scope,
374 llvm::AtomicOrdering,
375 llvm::LLVMContext &Ctx)
const {
376 return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
381 StringRef OpenCLName,
382 unsigned AccessQualifier) {
393 if (OpenCLName.starts_with(
"image2d"))
395 else if (OpenCLName.starts_with(
"image3d"))
397 else if (OpenCLName ==
"image1d_buffer")
400 assert(OpenCLName.starts_with(
"image1d") &&
"Unknown image type");
405 if (OpenCLName.contains(
"_depth"))
407 if (OpenCLName.contains(
"_array"))
409 if (OpenCLName.contains(
"_msaa"))
413 IntParams.push_back(AccessQualifier);
415 return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
419llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
420 const Type *Ty)
const {
422 if (
auto *PipeTy = dyn_cast<PipeType>(Ty))
423 return llvm::TargetExtType::get(Ctx,
"spirv.Pipe", {},
424 {!PipeTy->isReadOnly()});
425 if (
auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
426 enum AccessQualifier :
unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
427 switch (BuiltinTy->getKind()) {
428#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
429 case BuiltinType::Id: \
430 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
431#include "clang/Basic/OpenCLImageTypes.def"
432 case BuiltinType::OCLSampler:
433 return llvm::TargetExtType::get(Ctx,
"spirv.Sampler");
434 case BuiltinType::OCLEvent:
435 return llvm::TargetExtType::get(Ctx,
"spirv.Event");
436 case BuiltinType::OCLClkEvent:
437 return llvm::TargetExtType::get(Ctx,
"spirv.DeviceEvent");
438 case BuiltinType::OCLQueue:
439 return llvm::TargetExtType::get(Ctx,
"spirv.Queue");
440 case BuiltinType::OCLReserveID:
441 return llvm::TargetExtType::get(Ctx,
"spirv.ReserveId");
442#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
443 case BuiltinType::OCLIntelSubgroupAVC##Id: \
444 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
445#include "clang/Basic/OpenCLExtensionTypes.def"
457 llvm::Type *IntegralType,
464 while (
Value.ugt(0)) {
465 uint32_t Word =
Value.trunc(32).getZExtValue();
466 Value.lshrInPlace(32);
468 Words.push_back(Word);
470 if (Words.size() == 0)
474 return llvm::TargetExtType::get(Ctx,
"spirv.IntegralConstant",
475 {IntegralType}, Words);
476 return llvm::TargetExtType::get(Ctx,
"spirv.Literal", {}, Words);
480 const HLSLInlineSpirvType *SpirvType) {
485 for (
auto &Operand : SpirvType->getOperands()) {
486 using SpirvOperandKind = SpirvOperand::SpirvOperandKind;
488 llvm::Type *Result =
nullptr;
489 switch (Operand.getKind()) {
490 case SpirvOperandKind::ConstantId: {
491 llvm::Type *IntegralType =
497 case SpirvOperandKind::Literal: {
501 case SpirvOperandKind::TypeId: {
502 QualType TypeOperand = Operand.getResultType();
504 assert(RD->isCompleteDefinition() &&
505 "Type completion should have been required in Sema");
507 const FieldDecl *HandleField = RD->findFirstNamedDataMember();
510 if (ResourceType->
getAs<HLSLAttributedResourceType>()) {
511 TypeOperand = ResourceType;
519 llvm_unreachable(
"HLSLInlineSpirvType had invalid operand!");
524 Operands.push_back(Result);
527 return llvm::TargetExtType::get(Ctx,
"spirv.Type", Operands,
528 {SpirvType->getOpcode(), SpirvType->getSize(),
529 SpirvType->getAlignment()});
532llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
533 CodeGenModule &CGM,
const Type *Ty,
534 const CGHLSLOffsetInfo &OffsetInfo)
const {
537 if (
auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Ty))
540 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
544 const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs();
545 switch (ResAttrs.ResourceClass) {
546 case llvm::dxil::ResourceClass::UAV:
547 case llvm::dxil::ResourceClass::SRV: {
549 QualType ContainedTy = ResType->getContainedType();
553 assert(!ResAttrs.IsROV &&
554 "Rasterizer order views not implemented for SPIR-V yet");
556 if (!ResAttrs.RawBuffer) {
558 return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM);
561 if (ResAttrs.IsCounter) {
562 llvm::Type *ElemType = llvm::Type::getInt32Ty(Ctx);
564 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer", {ElemType},
568 llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0);
570 bool IsWritable = ResAttrs.ResourceClass == llvm::dxil::ResourceClass::UAV;
571 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer",
575 case llvm::dxil::ResourceClass::CBuffer: {
576 QualType ContainedTy = ResType->getContainedType();
580 llvm::StructType *BufferLayoutTy =
581 HLSLBufferLayoutBuilder(CGM).layOutStruct(
584 return llvm::TargetExtType::get(Ctx,
"spirv.VulkanBuffer", {BufferLayoutTy},
588 case llvm::dxil::ResourceClass::Sampler:
589 return llvm::TargetExtType::get(Ctx,
"spirv.Sampler");
596 const HLSLAttributedResourceType::Attributes &attributes,
597 llvm::Type *SampledType,
QualType Ty,
unsigned NumChannels) {
602 if (LangOpts.HLSLSpvUseUnknownImageFormat ||
603 attributes.ResourceClass != llvm::dxil::ResourceClass::UAV) {
607 if (SampledType->isIntegerTy(32)) {
609 if (NumChannels == 1)
611 if (NumChannels == 2)
613 if (NumChannels == 4)
616 if (NumChannels == 1)
618 if (NumChannels == 2)
620 if (NumChannels == 4)
623 }
else if (SampledType->isIntegerTy(64)) {
624 if (NumChannels == 1) {
630 }
else if (SampledType->isFloatTy()) {
631 if (NumChannels == 1)
633 if (NumChannels == 2)
635 if (NumChannels == 4)
642llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
643 const HLSLAttributedResourceType::Attributes &attributes, QualType Ty,
644 CodeGenModule &CGM)
const {
647 unsigned NumChannels = 1;
649 if (
const VectorType *
V = dyn_cast<VectorType>(Ty)) {
650 NumChannels =
V->getNumElements();
651 Ty =
V->getElementType();
653 assert(!Ty->
isVectorType() &&
"We still have a vector type.");
657 assert((SampledType->isIntegerTy() || SampledType->isFloatingPointTy()) &&
658 "The element type for a SPIR-V resource must be a scalar integer or "
659 "floating point type.");
664 SmallVector<unsigned, 6> IntParams(6, 0);
685 attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
691 llvm::TargetExtType *ImageType =
692 llvm::TargetExtType::get(Ctx, Name, {SampledType}, IntParams);
696std::unique_ptr<TargetCodeGenInfo>
698 return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.
getTypes());
701std::unique_ptr<TargetCodeGenInfo>
703 return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.
getTypes());
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Type * getInlineSpirvType(CodeGenModule &CGM, const HLSLInlineSpirvType *SpirvType)
static llvm::Type * getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, StringRef OpenCLName, unsigned AccessQualifier)
Construct a SPIR-V target extension type for the given OpenCL image type.
static unsigned getImageFormat(const LangOptions &LangOpts, const HLSLAttributedResourceType::Attributes &attributes, llvm::Type *SampledType, QualType Ty, unsigned NumChannels)
static llvm::Type * getInlineSpirvConstant(CodeGenModule &CGM, llvm::Type *IntegralType, llvm::APInt Value)
unsigned getTargetAddressSpace(LangAS AS) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Represents a member of a struct/union/class.
ExtInfo withCallingConv(CallingConv cc) const
ExtInfo getExtInfo() const
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
bool hasFlexibleArrayMember() const
Scope - A scope is a transient data structure that is used while parsing the program.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isStructureType() const
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
CanQualType getCanonicalTypeUnqualified() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * getAs() const
Member-template getAs<specific type>'.
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
bool isNullPtrType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createSPIRVTargetCodeGenInfo(CodeGenModule &CGM)
std::unique_ptr< TargetCodeGenInfo > createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM)
The JSON file list parser is used to communicate input to InstallAPI.
StorageClass
Storage classes.
const FunctionProtoType * T
@ Type
The name was classified as a type.
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
LangAS getLangASFromTargetAS(unsigned TargetAS)