clang 22.0.0git
SPIR.cpp
Go to the documentation of this file.
1//===- SPIR.cpp -----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
11#include "TargetInfo.h"
12
13using namespace clang;
14using namespace clang::CodeGen;
15
16//===----------------------------------------------------------------------===//
17// Base ABI and target codegen info implementation common between SPIR and
18// SPIR-V.
19//===----------------------------------------------------------------------===//
20
21namespace {
22class CommonSPIRABIInfo : public DefaultABIInfo {
23public:
24 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
25
26private:
27 void setCCs();
28};
29
30class SPIRVABIInfo : public CommonSPIRABIInfo {
31public:
32 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
33 void computeInfo(CGFunctionInfo &FI) const override;
34
35private:
36 ABIArgInfo classifyReturnType(QualType RetTy) const;
37 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
38 ABIArgInfo classifyArgumentType(QualType Ty) const;
39};
40} // end anonymous namespace
41namespace {
42class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
43public:
44 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
45 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
46 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
47 : TargetCodeGenInfo(std::move(ABIInfo)) {}
48
49 LangAS getASTAllocaAddressSpace() const override {
51 getABIInfo().getDataLayout().getAllocaAddrSpace());
52 }
53
54 unsigned getDeviceKernelCallingConv() const override;
55 llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override;
56 llvm::Type *getHLSLType(CodeGenModule &CGM, const Type *Ty,
57 const CGHLSLOffsetInfo &OffsetInfo) const override;
58
59 llvm::Type *getHLSLPadding(CodeGenModule &CGM,
60 CharUnits NumBytes) const override {
61 unsigned Size = NumBytes.getQuantity();
62 return llvm::TargetExtType::get(CGM.getLLVMContext(), "spirv.Padding", {},
63 {Size});
64 }
65
66 bool isHLSLPadding(llvm::Type *Ty) const override {
67 if (auto *TET = dyn_cast<llvm::TargetExtType>(Ty))
68 return TET->getName() == "spirv.Padding";
69 return false;
70 }
71
72 llvm::Type *getSPIRVImageTypeFromHLSLResource(
73 const HLSLAttributedResourceType::Attributes &attributes,
74 QualType SampledType, CodeGenModule &CGM) const;
75 void
76 setOCLKernelStubCallingConvention(const FunctionType *&FT) const override;
77 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
78 llvm::PointerType *T,
79 QualType QT) const override;
80 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
81 CodeGen::CodeGenModule &M) const override;
82};
83class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
84public:
85 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
86 : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
87 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
88 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
89 const VarDecl *D) const override;
90 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
91 CodeGen::CodeGenModule &M) const override;
92 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
93 SyncScope Scope,
94 llvm::AtomicOrdering Ordering,
95 llvm::LLVMContext &Ctx) const override;
96 bool supportsLibCall() const override {
97 return getABIInfo().getTarget().getTriple().getVendor() !=
98 llvm::Triple::AMD;
99 }
100};
101
102inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) {
103 switch (Scope) {
106 return "singlethread";
110 return "subgroup";
116 return "workgroup";
120 return "device";
124 return "";
125 }
126 return "";
127}
128} // End anonymous namespace.
129
130void CommonSPIRABIInfo::setCCs() {
131 assert(getRuntimeCC() == llvm::CallingConv::C);
132 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
133}
134
135ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy) const {
136 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
138 if (!isAggregateTypeForABI(RetTy) || getRecordArgABI(RetTy, getCXXABI()))
140
141 if (const auto *RD = RetTy->getAsRecordDecl();
142 RD && RD->hasFlexibleArrayMember())
144
145 // TODO: The AMDGPU ABI is non-trivial to represent in SPIR-V; in order to
146 // avoid encoding various architecture specific bits here we return everything
147 // as direct to retain type info for things like aggregates, for later perusal
148 // when translating back to LLVM/lowering in the BE. This is also why we
149 // disable flattening as the outcomes can mismatch between SPIR-V and AMDGPU.
150 // This will be revisited / optimised in the future.
151 return ABIArgInfo::getDirect(CGT.ConvertType(RetTy), 0u, nullptr, false);
152}
153
154ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
155 if (getContext().getLangOpts().isTargetDevice()) {
156 // Coerce pointer arguments with default address space to CrossWorkGroup
157 // pointers for target devices as default address space kernel arguments
158 // are not allowed. We use the opencl_global language address space which
159 // always maps to CrossWorkGroup.
160 llvm::Type *LTy = CGT.ConvertType(Ty);
161 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
162 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::opencl_global);
163 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
164 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
165 LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
166 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
167 }
168
169 if (isAggregateTypeForABI(Ty)) {
170 if (getTarget().getTriple().getVendor() == llvm::Triple::AMD)
171 // TODO: The AMDGPU kernel ABI passes aggregates byref, which is not
172 // currently expressible in SPIR-V; SPIR-V passes aggregates byval,
173 // which the AMDGPU kernel ABI does not allow. Passing aggregates as
174 // direct works around this impedance mismatch, as it retains type info
175 // and can be correctly handled, post reverse-translation, by the AMDGPU
176 // BE, which has to support this CC for legacy OpenCL purposes. It can
177 // be brittle and does lead to performance degradation in certain
178 // pathological cases. This will be revisited / optimised in the future,
179 // once a way to deal with the byref/byval impedance mismatch is
180 // identified.
181 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
182 // Force copying aggregate type in kernel arguments by value when
183 // compiling CUDA targeting SPIR-V. This is required for the object
184 // copied to be valid on the device.
185 // This behavior follows the CUDA spec
186 // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
187 // and matches the NVPTX implementation. TODO: hardcoding to 0 should be
188 // revisited if HIPSPV / byval starts making use of the AS of an indirect
189 // arg.
190 return getNaturalAlignIndirect(Ty, /*AddrSpace=*/0, /*byval=*/true);
191 }
192 }
193 return classifyArgumentType(Ty);
194}
195
196ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty) const {
197 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
199 if (!isAggregateTypeForABI(Ty))
201
202 // Records with non-trivial destructors/copy-constructors should not be
203 // passed by value.
204 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
205 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
207
208 if (const auto *RD = Ty->getAsRecordDecl();
209 RD && RD->hasFlexibleArrayMember())
211
212 return ABIArgInfo::getDirect(CGT.ConvertType(Ty), 0u, nullptr, false);
213}
214
215void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
216 // The logic is same as in DefaultABIInfo with an exception on the kernel
217 // arguments handling.
218 llvm::CallingConv::ID CC = FI.getCallingConvention();
219
220 if (!getCXXABI().classifyReturnType(FI))
222
223 for (auto &I : FI.arguments()) {
224 if (CC == llvm::CallingConv::SPIR_KERNEL) {
225 I.info = classifyKernelArgumentType(I.type);
226 } else {
227 I.info = classifyArgumentType(I.type);
228 }
229 }
230}
231
232namespace clang {
233namespace CodeGen {
235 if (CGM.getTarget().getTriple().isSPIRV())
236 SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
237 else
238 CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
239}
240}
241}
242
243unsigned CommonSPIRTargetCodeGenInfo::getDeviceKernelCallingConv() const {
244 return llvm::CallingConv::SPIR_KERNEL;
245}
246
247void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
248 const FunctionType *&FT) const {
249 // Convert HIP kernels to SPIR-V kernels.
250 if (getABIInfo().getContext().getLangOpts().HIP) {
251 FT = getABIInfo().getContext().adjustFunctionType(
253 return;
254 }
255}
256
257void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention(
258 const FunctionType *&FT) const {
259 FT = getABIInfo().getContext().adjustFunctionType(
261}
262
263// LLVM currently assumes a null pointer has the bit pattern 0, but some GPU
264// targets use a non-zero encoding for null in certain address spaces.
265// Because SPIR(-V) is a generic target and the bit pattern of null in
266// non-generic AS is unspecified, materialize null in non-generic AS via an
267// addrspacecast from null in generic AS. This allows later lowering to
268// substitute the target's real sentinel value.
269llvm::Constant *
270CommonSPIRTargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
271 llvm::PointerType *PT,
272 QualType QT) const {
274 ? LangAS::Default
276 unsigned ASAsInt = static_cast<unsigned>(AS);
277 unsigned FirstTargetASAsInt =
278 static_cast<unsigned>(LangAS::FirstTargetAddressSpace);
279 unsigned CodeSectionINTELAS = FirstTargetASAsInt + 9;
280 // As per SPV_INTEL_function_pointers, it is illegal to addrspacecast
281 // function pointers to/from the generic AS.
282 bool IsFunctionPtrAS =
283 CGM.getTriple().isSPIRV() && ASAsInt == CodeSectionINTELAS;
284 if (AS == LangAS::Default || AS == LangAS::opencl_generic ||
285 AS == LangAS::opencl_constant || IsFunctionPtrAS)
286 return llvm::ConstantPointerNull::get(PT);
287
288 auto &Ctx = CGM.getContext();
289 auto NPT = llvm::PointerType::get(
290 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
291 return llvm::ConstantExpr::getAddrSpaceCast(
292 llvm::ConstantPointerNull::get(NPT), PT);
293}
294
295void CommonSPIRTargetCodeGenInfo::setTargetAttributes(
296 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
297 if (M.getLangOpts().OpenCL || GV->isDeclaration())
298 return;
299
300 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
301 if (!FD)
302 return;
303
304 llvm::Function *F = dyn_cast<llvm::Function>(GV);
305 assert(F && "Expected GlobalValue to be a Function");
306
307 if (FD->hasAttr<DeviceKernelAttr>())
308 F->setCallingConv(getDeviceKernelCallingConv());
309}
310
311LangAS
312SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
313 const VarDecl *D) const {
314 assert(!CGM.getLangOpts().OpenCL &&
315 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
316 "Address space agnostic languages only");
317 // If we're here it means that we're using the SPIRDefIsGen ASMap, hence for
318 // the global AS we can rely on either cuda_device or sycl_global to be
319 // correct; however, since this is not a CUDA Device context, we use
320 // sycl_global to prevent confusion with the assertion.
321 LangAS DefaultGlobalAS = getLangASFromTargetAS(
322 CGM.getContext().getTargetAddressSpace(LangAS::sycl_global));
323 if (!D)
324 return DefaultGlobalAS;
325
326 LangAS AddrSpace = D->getType().getAddressSpace();
327 if (AddrSpace != LangAS::Default)
328 return AddrSpace;
329
330 return DefaultGlobalAS;
331}
332
333void SPIRVTargetCodeGenInfo::setTargetAttributes(
334 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
335 if (GV->isDeclaration())
336 return;
337
338 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
339 if (!FD)
340 return;
341
342 llvm::Function *F = dyn_cast<llvm::Function>(GV);
343 assert(F && "Expected GlobalValue to be a Function");
344
345 if (FD->hasAttr<DeviceKernelAttr>())
346 F->setCallingConv(getDeviceKernelCallingConv());
347
348 if (!M.getLangOpts().HIP ||
349 M.getTarget().getTriple().getVendor() != llvm::Triple::AMD)
350 return;
351
352 if (!FD->hasAttr<CUDAGlobalAttr>())
353 return;
354
355 unsigned N = M.getLangOpts().GPUMaxThreadsPerBlock;
356 if (auto FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>())
357 N = FlatWGS->getMax()->EvaluateKnownConstInt(M.getContext()).getExtValue();
358
359 // We encode the maximum flat WG size in the first component of the 3D
360 // max_work_group_size attribute, which will get reverse translated into the
361 // original AMDGPU attribute when targeting AMDGPU.
362 auto Int32Ty = llvm::IntegerType::getInt32Ty(M.getLLVMContext());
363 llvm::Metadata *AttrMDArgs[] = {
364 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, N)),
365 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1)),
366 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1))};
367
368 F->setMetadata("max_work_group_size",
369 llvm::MDNode::get(M.getLLVMContext(), AttrMDArgs));
370}
371
372llvm::SyncScope::ID
373SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &, SyncScope Scope,
374 llvm::AtomicOrdering,
375 llvm::LLVMContext &Ctx) const {
376 return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
377}
378
379/// Construct a SPIR-V target extension type for the given OpenCL image type.
380static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType,
381 StringRef OpenCLName,
382 unsigned AccessQualifier) {
383 // These parameters compare to the operands of OpTypeImage (see
384 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage
385 // for more details). The first 6 integer parameters all default to 0, and
386 // will be changed to 1 only for the image type(s) that set the parameter to
387 // one. The 7th integer parameter is the access qualifier, which is tacked on
388 // at the end.
389 SmallVector<unsigned, 7> IntParams = {0, 0, 0, 0, 0, 0};
390
391 // Choose the dimension of the image--this corresponds to the Dim enum in
392 // SPIR-V (first integer parameter of OpTypeImage).
393 if (OpenCLName.starts_with("image2d"))
394 IntParams[0] = 1;
395 else if (OpenCLName.starts_with("image3d"))
396 IntParams[0] = 2;
397 else if (OpenCLName == "image1d_buffer")
398 IntParams[0] = 5; // Buffer
399 else
400 assert(OpenCLName.starts_with("image1d") && "Unknown image type");
401
402 // Set the other integer parameters of OpTypeImage if necessary. Note that the
403 // OpenCL image types don't provide any information for the Sampled or
404 // Image Format parameters.
405 if (OpenCLName.contains("_depth"))
406 IntParams[1] = 1;
407 if (OpenCLName.contains("_array"))
408 IntParams[2] = 1;
409 if (OpenCLName.contains("_msaa"))
410 IntParams[3] = 1;
411
412 // Access qualifier
413 IntParams.push_back(AccessQualifier);
414
415 return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
416 IntParams);
417}
418
419llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
420 const Type *Ty) const {
421 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
422 if (auto *PipeTy = dyn_cast<PipeType>(Ty))
423 return llvm::TargetExtType::get(Ctx, "spirv.Pipe", {},
424 {!PipeTy->isReadOnly()});
425 if (auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
426 enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
427 switch (BuiltinTy->getKind()) {
428#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
429 case BuiltinType::Id: \
430 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
431#include "clang/Basic/OpenCLImageTypes.def"
432 case BuiltinType::OCLSampler:
433 return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
434 case BuiltinType::OCLEvent:
435 return llvm::TargetExtType::get(Ctx, "spirv.Event");
436 case BuiltinType::OCLClkEvent:
437 return llvm::TargetExtType::get(Ctx, "spirv.DeviceEvent");
438 case BuiltinType::OCLQueue:
439 return llvm::TargetExtType::get(Ctx, "spirv.Queue");
440 case BuiltinType::OCLReserveID:
441 return llvm::TargetExtType::get(Ctx, "spirv.ReserveId");
442#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
443 case BuiltinType::OCLIntelSubgroupAVC##Id: \
444 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
445#include "clang/Basic/OpenCLExtensionTypes.def"
446 default:
447 return nullptr;
448 }
449 }
450
451 return nullptr;
452}
453
454// Gets a spirv.IntegralConstant or spirv.Literal. If IntegralType is present,
455// returns an IntegralConstant, otherwise returns a Literal.
456static llvm::Type *getInlineSpirvConstant(CodeGenModule &CGM,
457 llvm::Type *IntegralType,
458 llvm::APInt Value) {
459 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
460
461 // Convert the APInt value to an array of uint32_t words
463
464 while (Value.ugt(0)) {
465 uint32_t Word = Value.trunc(32).getZExtValue();
466 Value.lshrInPlace(32);
467
468 Words.push_back(Word);
469 }
470 if (Words.size() == 0)
471 Words.push_back(0);
472
473 if (IntegralType)
474 return llvm::TargetExtType::get(Ctx, "spirv.IntegralConstant",
475 {IntegralType}, Words);
476 return llvm::TargetExtType::get(Ctx, "spirv.Literal", {}, Words);
477}
478
479static llvm::Type *getInlineSpirvType(CodeGenModule &CGM,
480 const HLSLInlineSpirvType *SpirvType) {
481 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
482
484
485 for (auto &Operand : SpirvType->getOperands()) {
486 using SpirvOperandKind = SpirvOperand::SpirvOperandKind;
487
488 llvm::Type *Result = nullptr;
489 switch (Operand.getKind()) {
490 case SpirvOperandKind::ConstantId: {
491 llvm::Type *IntegralType =
492 CGM.getTypes().ConvertType(Operand.getResultType());
493
494 Result = getInlineSpirvConstant(CGM, IntegralType, Operand.getValue());
495 break;
496 }
497 case SpirvOperandKind::Literal: {
498 Result = getInlineSpirvConstant(CGM, nullptr, Operand.getValue());
499 break;
500 }
501 case SpirvOperandKind::TypeId: {
502 QualType TypeOperand = Operand.getResultType();
503 if (const auto *RD = TypeOperand->getAsRecordDecl()) {
504 assert(RD->isCompleteDefinition() &&
505 "Type completion should have been required in Sema");
506
507 const FieldDecl *HandleField = RD->findFirstNamedDataMember();
508 if (HandleField) {
509 QualType ResourceType = HandleField->getType();
510 if (ResourceType->getAs<HLSLAttributedResourceType>()) {
511 TypeOperand = ResourceType;
512 }
513 }
514 }
515 Result = CGM.getTypes().ConvertType(TypeOperand);
516 break;
517 }
518 default:
519 llvm_unreachable("HLSLInlineSpirvType had invalid operand!");
520 break;
521 }
522
523 assert(Result);
524 Operands.push_back(Result);
525 }
526
527 return llvm::TargetExtType::get(Ctx, "spirv.Type", Operands,
528 {SpirvType->getOpcode(), SpirvType->getSize(),
529 SpirvType->getAlignment()});
530}
531
532llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
533 CodeGenModule &CGM, const Type *Ty,
534 const CGHLSLOffsetInfo &OffsetInfo) const {
535 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
536
537 if (auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Ty))
538 return getInlineSpirvType(CGM, SpirvType);
539
540 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
541 if (!ResType)
542 return nullptr;
543
544 const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs();
545 switch (ResAttrs.ResourceClass) {
546 case llvm::dxil::ResourceClass::UAV:
547 case llvm::dxil::ResourceClass::SRV: {
548 // TypedBuffer and RawBuffer both need element type
549 QualType ContainedTy = ResType->getContainedType();
550 if (ContainedTy.isNull())
551 return nullptr;
552
553 assert(!ResAttrs.IsROV &&
554 "Rasterizer order views not implemented for SPIR-V yet");
555
556 if (!ResAttrs.RawBuffer) {
557 // convert element type
558 return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM);
559 }
560
561 if (ResAttrs.IsCounter) {
562 llvm::Type *ElemType = llvm::Type::getInt32Ty(Ctx);
563 uint32_t StorageClass = /* StorageBuffer storage class */ 12;
564 return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {ElemType},
565 {StorageClass, true});
566 }
567 llvm::Type *ElemType = CGM.getTypes().ConvertTypeForMem(ContainedTy);
568 llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0);
569 uint32_t StorageClass = /* StorageBuffer storage class */ 12;
570 bool IsWritable = ResAttrs.ResourceClass == llvm::dxil::ResourceClass::UAV;
571 return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer",
572 {RuntimeArrayType},
573 {StorageClass, IsWritable});
574 }
575 case llvm::dxil::ResourceClass::CBuffer: {
576 QualType ContainedTy = ResType->getContainedType();
577 if (ContainedTy.isNull() || !ContainedTy->isStructureType())
578 return nullptr;
579
580 llvm::StructType *BufferLayoutTy =
581 HLSLBufferLayoutBuilder(CGM).layOutStruct(
582 ContainedTy->getAsCanonical<RecordType>(), OffsetInfo);
583 uint32_t StorageClass = /* Uniform storage class */ 2;
584 return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {BufferLayoutTy},
585 {StorageClass, false});
586 break;
587 }
588 case llvm::dxil::ResourceClass::Sampler:
589 return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
590 }
591 return nullptr;
592}
593
594static unsigned
596 const HLSLAttributedResourceType::Attributes &attributes,
597 llvm::Type *SampledType, QualType Ty, unsigned NumChannels) {
598 // For images with `Sampled` operand equal to 2, there are restrictions on
599 // using the Unknown image format. To avoid these restrictions in common
600 // cases, we guess an image format for them based on the sampled type and the
601 // number of channels. This is intended to match the behaviour of DXC.
602 if (LangOpts.HLSLSpvUseUnknownImageFormat ||
603 attributes.ResourceClass != llvm::dxil::ResourceClass::UAV) {
604 return 0; // Unknown
605 }
606
607 if (SampledType->isIntegerTy(32)) {
608 if (Ty->isSignedIntegerType()) {
609 if (NumChannels == 1)
610 return 24; // R32i
611 if (NumChannels == 2)
612 return 25; // Rg32i
613 if (NumChannels == 4)
614 return 21; // Rgba32i
615 } else {
616 if (NumChannels == 1)
617 return 33; // R32ui
618 if (NumChannels == 2)
619 return 35; // Rg32ui
620 if (NumChannels == 4)
621 return 30; // Rgba32ui
622 }
623 } else if (SampledType->isIntegerTy(64)) {
624 if (NumChannels == 1) {
625 if (Ty->isSignedIntegerType()) {
626 return 41; // R64i
627 }
628 return 40; // R64ui
629 }
630 } else if (SampledType->isFloatTy()) {
631 if (NumChannels == 1)
632 return 3; // R32f
633 if (NumChannels == 2)
634 return 6; // Rg32f
635 if (NumChannels == 4)
636 return 1; // Rgba32f
637 }
638
639 return 0; // Unknown
640}
641
642llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
643 const HLSLAttributedResourceType::Attributes &attributes, QualType Ty,
644 CodeGenModule &CGM) const {
645 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
646
647 unsigned NumChannels = 1;
649 if (const VectorType *V = dyn_cast<VectorType>(Ty)) {
650 NumChannels = V->getNumElements();
651 Ty = V->getElementType();
652 }
653 assert(!Ty->isVectorType() && "We still have a vector type.");
654
655 llvm::Type *SampledType = CGM.getTypes().ConvertTypeForMem(Ty);
656
657 assert((SampledType->isIntegerTy() || SampledType->isFloatingPointTy()) &&
658 "The element type for a SPIR-V resource must be a scalar integer or "
659 "floating point type.");
660
661 // These parameters correspond to the operands to the OpTypeImage SPIR-V
662 // instruction. See
663 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage.
664 SmallVector<unsigned, 6> IntParams(6, 0);
665
666 const char *Name =
667 Ty->isSignedIntegerType() ? "spirv.SignedImage" : "spirv.Image";
668
669 // Dim
670 // For now we assume everything is a buffer.
671 IntParams[0] = 5;
672
673 // Depth
674 // HLSL does not indicate if it is a depth texture or not, so we use unknown.
675 IntParams[1] = 2;
676
677 // Arrayed
678 IntParams[2] = 0;
679
680 // MS
681 IntParams[3] = 0;
682
683 // Sampled
684 IntParams[4] =
685 attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
686
687 // Image format.
688 IntParams[5] = getImageFormat(CGM.getLangOpts(), attributes, SampledType, Ty,
689 NumChannels);
690
691 llvm::TargetExtType *ImageType =
692 llvm::TargetExtType::get(Ctx, Name, {SampledType}, IntParams);
693 return ImageType;
694}
695
696std::unique_ptr<TargetCodeGenInfo>
698 return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.getTypes());
699}
700
701std::unique_ptr<TargetCodeGenInfo>
703 return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.getTypes());
704}
#define V(N, I)
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition CGCall.cpp:360
static llvm::Type * getInlineSpirvType(CodeGenModule &CGM, const HLSLInlineSpirvType *SpirvType)
Definition SPIR.cpp:479
static llvm::Type * getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, StringRef OpenCLName, unsigned AccessQualifier)
Construct a SPIR-V target extension type for the given OpenCL image type.
Definition SPIR.cpp:380
static unsigned getImageFormat(const LangOptions &LangOpts, const HLSLAttributedResourceType::Attributes &attributes, llvm::Type *SampledType, QualType Ty, unsigned NumChannels)
Definition SPIR.cpp:595
static llvm::Type * getInlineSpirvConstant(CodeGenModule &CGM, llvm::Type *IntegralType, llvm::APInt Value)
Definition SPIR.cpp:456
unsigned getTargetAddressSpace(LangAS AS) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition CGCXXABI.h:158
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
DefaultABIInfo - The default implementation for ABI specific details.
Definition ABIInfoImpl.h:21
ABIArgInfo classifyArgumentType(QualType RetTy) const
ABIArgInfo classifyReturnType(QualType RetTy) const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition TargetInfo.h:49
T * getAttr() const
Definition DeclBase.h:573
bool hasAttr() const
Definition DeclBase.h:577
Represents a member of a struct/union/class.
Definition Decl.h:3160
ExtInfo withCallingConv(CallingConv cc) const
Definition TypeBase.h:4673
ExtInfo getExtInfo() const
Definition TypeBase.h:4806
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8404
bool hasFlexibleArrayMember() const
Definition Decl.h:4345
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isStructureType() const
Definition Type.cpp:678
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
CanQualType getCanonicalTypeUnqualified() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isVectorType() const
Definition TypeBase.h:8654
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
Definition TypeBase.h:2921
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition Type.cpp:653
bool isNullPtrType() const
Definition TypeBase.h:8908
QualType getType() const
Definition Decl.h:723
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:145
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Definition SPIR.cpp:234
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createSPIRVTargetCodeGenInfo(CodeGenModule &CGM)
Definition SPIR.cpp:702
std::unique_ptr< TargetCodeGenInfo > createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM)
Definition SPIR.cpp:697
The JSON file list parser is used to communicate input to InstallAPI.
StorageClass
Storage classes.
Definition Specifiers.h:248
const FunctionProtoType * T
@ Type
The name was classified as a type.
Definition Sema.h:562
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
@ CC_DeviceKernel
Definition Specifiers.h:292
@ CC_SpirFunction
Definition Specifiers.h:291
LangAS getLangASFromTargetAS(unsigned TargetAS)
unsigned int uint32_t