clang 20.0.0git
AMDGPU.cpp
Go to the documentation of this file.
1//===- AMDGPU.cpp ---------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
12
13using namespace clang;
14using namespace clang::CodeGen;
15
16//===----------------------------------------------------------------------===//
17// AMDGPU ABI Implementation
18//===----------------------------------------------------------------------===//
19
20namespace {
21
22class AMDGPUABIInfo final : public DefaultABIInfo {
23private:
24 static const unsigned MaxNumRegsForArgsRet = 16;
25
26 unsigned numRegsForType(QualType Ty) const;
27
28 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
30 uint64_t Members) const override;
31
32 // Coerce HIP scalar pointer arguments from generic pointers to global ones.
33 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
34 unsigned ToAS) const {
35 // Single value types.
36 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
37 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
38 return llvm::PointerType::get(Ty->getContext(), ToAS);
39 return Ty;
40 }
41
42public:
43 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
44 DefaultABIInfo(CGT) {}
45
47 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
49 unsigned &NumRegsLeft) const;
50
51 void computeInfo(CGFunctionInfo &FI) const override;
52 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
53 AggValueSlot Slot) const override;
54};
55
56bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
57 return true;
58}
59
60bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
61 const Type *Base, uint64_t Members) const {
62 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
63
64 // Homogeneous Aggregates may occupy at most 16 registers.
65 return Members * NumRegs <= MaxNumRegsForArgsRet;
66}
67
68/// Estimate number of registers the type will use when passed in registers.
69unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
70 unsigned NumRegs = 0;
71
72 if (const VectorType *VT = Ty->getAs<VectorType>()) {
73 // Compute from the number of elements. The reported size is based on the
74 // in-memory size, which includes the padding 4th element for 3-vectors.
75 QualType EltTy = VT->getElementType();
76 unsigned EltSize = getContext().getTypeSize(EltTy);
77
78 // 16-bit element vectors should be passed as packed.
79 if (EltSize == 16)
80 return (VT->getNumElements() + 1) / 2;
81
82 unsigned EltNumRegs = (EltSize + 31) / 32;
83 return EltNumRegs * VT->getNumElements();
84 }
85
86 if (const RecordType *RT = Ty->getAs<RecordType>()) {
87 const RecordDecl *RD = RT->getDecl();
88 assert(!RD->hasFlexibleArrayMember());
89
90 for (const FieldDecl *Field : RD->fields()) {
91 QualType FieldTy = Field->getType();
92 NumRegs += numRegsForType(FieldTy);
93 }
94
95 return NumRegs;
96 }
97
98 return (getContext().getTypeSize(Ty) + 31) / 32;
99}
100
101void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
102 llvm::CallingConv::ID CC = FI.getCallingConvention();
103
104 if (!getCXXABI().classifyReturnType(FI))
106
107 unsigned ArgumentIndex = 0;
108 const unsigned numFixedArguments = FI.getNumRequiredArgs();
109
110 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
111 for (auto &Arg : FI.arguments()) {
112 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
113 Arg.info = classifyKernelArgumentType(Arg.type);
114 } else {
115 bool FixedArgument = ArgumentIndex++ < numFixedArguments;
116 Arg.info = classifyArgumentType(Arg.type, !FixedArgument, NumRegsLeft);
117 }
118 }
119}
120
121RValue AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
122 QualType Ty, AggValueSlot Slot) const {
123 const bool IsIndirect = false;
124 const bool AllowHigherAlign = false;
125 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
126 getContext().getTypeInfoInChars(Ty),
127 CharUnits::fromQuantity(4), AllowHigherAlign, Slot);
128}
129
130ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
131 if (isAggregateTypeForABI(RetTy)) {
132 // Records with non-trivial destructors/copy-constructors should not be
133 // returned by value.
134 if (!getRecordArgABI(RetTy, getCXXABI())) {
135 // Ignore empty structs/unions.
136 if (isEmptyRecord(getContext(), RetTy, true))
137 return ABIArgInfo::getIgnore();
138
139 // Lower single-element structs to just return a regular value.
140 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
141 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
142
143 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
144 const RecordDecl *RD = RT->getDecl();
145 if (RD->hasFlexibleArrayMember())
147 }
148
149 // Pack aggregates <= 4 bytes into single VGPR or pair.
150 uint64_t Size = getContext().getTypeSize(RetTy);
151 if (Size <= 16)
152 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
153
154 if (Size <= 32)
155 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
156
157 if (Size <= 64) {
158 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
159 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
160 }
161
162 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
163 return ABIArgInfo::getDirect();
164 }
165 }
166
167 // Otherwise just do the default thing.
169}
170
171/// For kernels all parameters are really passed in a special buffer. It doesn't
172/// make sense to pass anything byval, so everything must be direct.
173ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
175
176 // TODO: Can we omit empty structs?
177
178 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
179 Ty = QualType(SeltTy, 0);
180
181 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
182 llvm::Type *LTy = OrigLTy;
183 if (getContext().getLangOpts().HIP) {
184 LTy = coerceKernelArgumentType(
185 OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
186 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
187 }
188
189 // FIXME: Should also use this for OpenCL, but it requires addressing the
190 // problem of kernels being called.
191 //
192 // FIXME: This doesn't apply the optimization of coercing pointers in structs
193 // to global address space when using byref. This would require implementing a
194 // new kind of coercion of the in-memory type when for indirect arguments.
195 if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
198 getContext().getTypeAlignInChars(Ty),
199 getContext().getTargetAddressSpace(LangAS::opencl_constant),
200 false /*Realign*/, nullptr /*Padding*/);
201 }
202
203 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
204 // individual elements, which confuses the Clover OpenCL backend; therefore we
205 // have to set it to false here. Other args of getDirect() are just defaults.
206 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
207}
208
209ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, bool Variadic,
210 unsigned &NumRegsLeft) const {
211 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
212
214
215 if (Variadic) {
216 return ABIArgInfo::getDirect(/*T=*/nullptr,
217 /*Offset=*/0,
218 /*Padding=*/nullptr,
219 /*CanBeFlattened=*/false,
220 /*Align=*/0);
221 }
222
223 if (isAggregateTypeForABI(Ty)) {
224 // Records with non-trivial destructors/copy-constructors should not be
225 // passed by value.
226 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
227 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
228
229 // Ignore empty structs/unions.
230 if (isEmptyRecord(getContext(), Ty, true))
231 return ABIArgInfo::getIgnore();
232
233 // Lower single-element structs to just pass a regular value. TODO: We
234 // could do reasonable-size multiple-element structs too, using getExpand(),
235 // though watch out for things like bitfields.
236 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
237 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
238
239 if (const RecordType *RT = Ty->getAs<RecordType>()) {
240 const RecordDecl *RD = RT->getDecl();
241 if (RD->hasFlexibleArrayMember())
243 }
244
245 // Pack aggregates <= 8 bytes into single VGPR or pair.
246 uint64_t Size = getContext().getTypeSize(Ty);
247 if (Size <= 64) {
248 unsigned NumRegs = (Size + 31) / 32;
249 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
250
251 if (Size <= 16)
252 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
253
254 if (Size <= 32)
255 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
256
257 // XXX: Should this be i64 instead, and should the limit increase?
258 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
259 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
260 }
261
262 if (NumRegsLeft > 0) {
263 unsigned NumRegs = numRegsForType(Ty);
264 if (NumRegsLeft >= NumRegs) {
265 NumRegsLeft -= NumRegs;
266 return ABIArgInfo::getDirect();
267 }
268 }
269
270 // Use pass-by-reference in stead of pass-by-value for struct arguments in
271 // function ABI.
273 getContext().getTypeAlignInChars(Ty),
274 getContext().getTargetAddressSpace(LangAS::opencl_private));
275 }
276
277 // Otherwise just do the default thing.
279 if (!ArgInfo.isIndirect()) {
280 unsigned NumRegs = numRegsForType(Ty);
281 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
282 }
283
284 return ArgInfo;
285}
286
287class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
288public:
289 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
290 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
291
292 void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F,
293 CodeGenModule &CGM) const;
294
295 void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const override;
296
297 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
298 CodeGen::CodeGenModule &M) const override;
299 unsigned getOpenCLKernelCallingConv() const override;
300
301 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
302 llvm::PointerType *T, QualType QT) const override;
303
304 LangAS getASTAllocaAddressSpace() const override {
306 getABIInfo().getDataLayout().getAllocaAddrSpace());
307 }
309 const VarDecl *D) const override;
310 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
312 llvm::AtomicOrdering Ordering,
313 llvm::LLVMContext &Ctx) const override;
315 llvm::AtomicRMWInst &RMW) const override;
317 llvm::Function *BlockInvokeFunc,
318 llvm::Type *BlockTy) const override;
319 bool shouldEmitStaticExternCAliases() const override;
320 bool shouldEmitDWARFBitFieldSeparators() const override;
321 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
322};
323}
324
326 llvm::GlobalValue *GV) {
327 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
328 return false;
329
330 return !D->hasAttr<OMPDeclareTargetDeclAttr>() &&
331 (D->hasAttr<OpenCLKernelAttr>() ||
332 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
333 (isa<VarDecl>(D) &&
334 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
335 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
336 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType())));
337}
338
339void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
340 const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M) const {
341 const auto *ReqdWGS =
342 M.getLangOpts().OpenCL ? FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
343 const bool IsOpenCLKernel =
344 M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>();
345 const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>();
346
347 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
348 if (ReqdWGS || FlatWGS) {
349 M.handleAMDGPUFlatWorkGroupSizeAttr(F, FlatWGS, ReqdWGS);
350 } else if (IsOpenCLKernel || IsHIPKernel) {
351 // By default, restrict the maximum size to a value specified by
352 // --gpu-max-threads-per-block=n or its default value for HIP.
353 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
354 const unsigned DefaultMaxWorkGroupSize =
355 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
356 : M.getLangOpts().GPUMaxThreadsPerBlock;
357 std::string AttrVal =
358 std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
359 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
360 }
361
362 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>())
364
365 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
366 unsigned NumSGPR = Attr->getNumSGPR();
367
368 if (NumSGPR != 0)
369 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
370 }
371
372 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
373 uint32_t NumVGPR = Attr->getNumVGPR();
374
375 if (NumVGPR != 0)
376 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
377 }
378
379 if (const auto *Attr = FD->getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
380 uint32_t X = Attr->getMaxNumWorkGroupsX()
381 ->EvaluateKnownConstInt(M.getContext())
382 .getExtValue();
383 // Y and Z dimensions default to 1 if not specified
384 uint32_t Y = Attr->getMaxNumWorkGroupsY()
385 ? Attr->getMaxNumWorkGroupsY()
386 ->EvaluateKnownConstInt(M.getContext())
387 .getExtValue()
388 : 1;
389 uint32_t Z = Attr->getMaxNumWorkGroupsZ()
390 ? Attr->getMaxNumWorkGroupsZ()
391 ->EvaluateKnownConstInt(M.getContext())
392 .getExtValue()
393 : 1;
394
395 llvm::SmallString<32> AttrVal;
396 llvm::raw_svector_ostream OS(AttrVal);
397 OS << X << ',' << Y << ',' << Z;
398
399 F->addFnAttr("amdgpu-max-num-workgroups", AttrVal.str());
400 }
401}
402
403/// Emits control constants used to change per-architecture behaviour in the
404/// AMDGPU ROCm device libraries.
405void AMDGPUTargetCodeGenInfo::emitTargetGlobals(
406 CodeGen::CodeGenModule &CGM) const {
407 StringRef Name = "__oclc_ABI_version";
408 llvm::GlobalVariable *OriginalGV = CGM.getModule().getNamedGlobal(Name);
409 if (OriginalGV && !llvm::GlobalVariable::isExternalLinkage(OriginalGV->getLinkage()))
410 return;
411
413 llvm::CodeObjectVersionKind::COV_None)
414 return;
415
416 auto *Type = llvm::IntegerType::getIntNTy(CGM.getModule().getContext(), 32);
417 llvm::Constant *COV = llvm::ConstantInt::get(
419
420 // It needs to be constant weak_odr without externally_initialized so that
421 // the load instuction can be eliminated by the IPSCCP.
422 auto *GV = new llvm::GlobalVariable(
423 CGM.getModule(), Type, true, llvm::GlobalValue::WeakODRLinkage, COV, Name,
424 nullptr, llvm::GlobalValue::ThreadLocalMode::NotThreadLocal,
425 CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant));
426 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Local);
427 GV->setVisibility(llvm::GlobalValue::VisibilityTypes::HiddenVisibility);
428
429 // Replace any external references to this variable with the new global.
430 if (OriginalGV) {
431 OriginalGV->replaceAllUsesWith(GV);
432 GV->takeName(OriginalGV);
433 OriginalGV->eraseFromParent();
434 }
435}
436
437void AMDGPUTargetCodeGenInfo::setTargetAttributes(
438 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
440 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
441 GV->setDSOLocal(true);
442 }
443
444 if (GV->isDeclaration())
445 return;
446
447 llvm::Function *F = dyn_cast<llvm::Function>(GV);
448 if (!F)
449 return;
450
451 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
452 if (FD)
453 setFunctionDeclAttributes(FD, F, M);
454
456 F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
457
458 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
459 F->addFnAttr("amdgpu-ieee", "false");
460}
461
462unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
463 return llvm::CallingConv::AMDGPU_KERNEL;
464}
465
466// Currently LLVM assumes null pointers always have value 0,
467// which results in incorrectly transformed IR. Therefore, instead of
468// emitting null pointers in private and local address spaces, a null
469// pointer in generic address space is emitted which is casted to a
470// pointer in local or private address space.
471llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
472 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
473 QualType QT) const {
474 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
475 return llvm::ConstantPointerNull::get(PT);
476
477 auto &Ctx = CGM.getContext();
478 auto NPT = llvm::PointerType::get(
479 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
480 return llvm::ConstantExpr::getAddrSpaceCast(
481 llvm::ConstantPointerNull::get(NPT), PT);
482}
483
484LangAS
485AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
486 const VarDecl *D) const {
487 assert(!CGM.getLangOpts().OpenCL &&
488 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
489 "Address space agnostic languages only");
490 LangAS DefaultGlobalAS = getLangASFromTargetAS(
491 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
492 if (!D)
493 return DefaultGlobalAS;
494
495 LangAS AddrSpace = D->getType().getAddressSpace();
496 if (AddrSpace != LangAS::Default)
497 return AddrSpace;
498
499 // Only promote to address space 4 if VarDecl has constant initialization.
500 if (D->getType().isConstantStorage(CGM.getContext(), false, false) &&
501 D->hasConstantInitialization()) {
502 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
503 return *ConstAS;
504 }
505 return DefaultGlobalAS;
506}
507
508llvm::SyncScope::ID
509AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
511 llvm::AtomicOrdering Ordering,
512 llvm::LLVMContext &Ctx) const {
513 std::string Name;
514 switch (Scope) {
515 case SyncScope::HIPSingleThread:
516 case SyncScope::SingleScope:
517 Name = "singlethread";
518 break;
519 case SyncScope::HIPWavefront:
520 case SyncScope::OpenCLSubGroup:
521 case SyncScope::WavefrontScope:
522 Name = "wavefront";
523 break;
524 case SyncScope::HIPWorkgroup:
525 case SyncScope::OpenCLWorkGroup:
526 case SyncScope::WorkgroupScope:
527 Name = "workgroup";
528 break;
529 case SyncScope::HIPAgent:
530 case SyncScope::OpenCLDevice:
531 case SyncScope::DeviceScope:
532 Name = "agent";
533 break;
534 case SyncScope::SystemScope:
535 case SyncScope::HIPSystem:
536 case SyncScope::OpenCLAllSVMDevices:
537 Name = "";
538 break;
539 }
540
541 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
542 if (!Name.empty())
543 Name = Twine(Twine(Name) + Twine("-")).str();
544
545 Name = Twine(Twine(Name) + Twine("one-as")).str();
546 }
547
548 return Ctx.getOrInsertSyncScopeID(Name);
549}
550
551void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
552 CodeGenFunction &CGF, llvm::AtomicRMWInst &RMW) const {
554 return;
555
556 // TODO: Introduce new, more controlled options that also work for integers,
557 // and deprecate allowAMDGPUUnsafeFPAtomics.
558 llvm::AtomicRMWInst::BinOp RMWOp = RMW.getOperation();
559 if (llvm::AtomicRMWInst::isFPOperation(RMWOp)) {
560 llvm::MDNode *Empty = llvm::MDNode::get(CGF.getLLVMContext(), {});
561 RMW.setMetadata("amdgpu.no.fine.grained.memory", Empty);
562
563 if (RMWOp == llvm::AtomicRMWInst::FAdd && RMW.getType()->isFloatTy())
564 RMW.setMetadata("amdgpu.ignore.denormal.mode", Empty);
565 }
566}
567
568bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
569 return false;
570}
571
572bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators() const {
573 return true;
574}
575
576void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
577 const FunctionType *&FT) const {
578 FT = getABIInfo().getContext().adjustFunctionType(
580}
581
582/// Create an OpenCL kernel for an enqueued block.
583///
584/// The type of the first argument (the block literal) is the struct type
585/// of the block literal instead of a pointer type. The first argument
586/// (block literal) is passed directly by value to the kernel. The kernel
587/// allocates the same type of struct on stack and stores the block literal
588/// to it and passes its pointer to the block invoke function. The kernel
589/// has "enqueued-block" function attribute and kernel argument metadata.
590llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
591 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy) const {
592 auto &Builder = CGF.Builder;
593 auto &C = CGF.getLLVMContext();
594
595 auto *InvokeFT = Invoke->getFunctionType();
603
604 ArgTys.push_back(BlockTy);
605 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
606 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
607 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
608 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
609 AccessQuals.push_back(llvm::MDString::get(C, "none"));
610 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
611 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
612 ArgTys.push_back(InvokeFT->getParamType(I));
613 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
614 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
615 AccessQuals.push_back(llvm::MDString::get(C, "none"));
616 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
617 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
618 ArgNames.push_back(
619 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
620 }
621 std::string Name = Invoke->getName().str() + "_kernel";
622 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
623 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
624 &CGF.CGM.getModule());
625 F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
626
627 llvm::AttrBuilder KernelAttrs(C);
628 // FIXME: The invoke isn't applying the right attributes either
629 // FIXME: This is missing setTargetAttributes
631 KernelAttrs.addAttribute("enqueued-block");
632 F->addFnAttrs(KernelAttrs);
633
634 auto IP = CGF.Builder.saveIP();
635 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
636 Builder.SetInsertPoint(BB);
637 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
638 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
639 BlockPtr->setAlignment(BlockAlign);
640 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
641 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
643 Args.push_back(Cast);
644 for (llvm::Argument &A : llvm::drop_begin(F->args()))
645 Args.push_back(&A);
646 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
647 call->setCallingConv(Invoke->getCallingConv());
648 Builder.CreateRetVoid();
649 Builder.restoreIP(IP);
650
651 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
652 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
653 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
654 F->setMetadata("kernel_arg_base_type",
655 llvm::MDNode::get(C, ArgBaseTypeNames));
656 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
657 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
658 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
659
660 return F;
661}
662
664 llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
665 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
666 int32_t *MaxThreadsVal) {
667 unsigned Min = 0;
668 unsigned Max = 0;
669 if (FlatWGS) {
670 Min = FlatWGS->getMin()->EvaluateKnownConstInt(getContext()).getExtValue();
671 Max = FlatWGS->getMax()->EvaluateKnownConstInt(getContext()).getExtValue();
672 }
673 if (ReqdWGS && Min == 0 && Max == 0)
674 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
675
676 if (Min != 0) {
677 assert(Min <= Max && "Min must be less than or equal Max");
678
679 if (MinThreadsVal)
680 *MinThreadsVal = Min;
681 if (MaxThreadsVal)
682 *MaxThreadsVal = Max;
683 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
684 if (F)
685 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
686 } else
687 assert(Max == 0 && "Max must be zero");
688}
689
691 llvm::Function *F, const AMDGPUWavesPerEUAttr *Attr) {
692 unsigned Min =
693 Attr->getMin()->EvaluateKnownConstInt(getContext()).getExtValue();
694 unsigned Max =
695 Attr->getMax()
696 ? Attr->getMax()->EvaluateKnownConstInt(getContext()).getExtValue()
697 : 0;
698
699 if (Min != 0) {
700 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
701
702 std::string AttrVal = llvm::utostr(Min);
703 if (Max != 0)
704 AttrVal = AttrVal + "," + llvm::utostr(Max);
705 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
706 } else
707 assert(Max == 0 && "Max must be zero");
708}
709
710std::unique_ptr<TargetCodeGenInfo>
712 return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.getTypes());
713}
const Decl * D
Expr * E
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
Definition: AMDGPU.cpp:325
#define X(type, name)
Definition: Value.h:143
Defines the clang::TargetOptions class.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:779
unsigned getTargetAddressSpace(LangAS AS) const
Attr - This represents one attribute.
Definition: Attr.h:42
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)
Pass this in memory using the IR byref attribute.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
Definition: ABIInfo.cpp:47
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
Definition: ABIInfo.cpp:51
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
An aggregate value slot.
Definition: CGValue.h:504
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:158
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
unsigned getNumRequiredArgs() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
const TargetInfo & getTarget() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
void handleAMDGPUWavesPerEUAttr(llvm::Function *F, const AMDGPUWavesPerEUAttr *A)
Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to F.
Definition: AMDGPU.cpp:690
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
void handleAMDGPUFlatWorkGroupSizeAttr(llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A, const ReqdWorkGroupSizeAttr *ReqdWGS=nullptr, int32_t *MinThreadsVal=nullptr, int32_t *MaxThreadsVal=nullptr)
Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute to F.
Definition: AMDGPU.cpp:663
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
Definition: CGCall.cpp:2135
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
DefaultABIInfo - The default implementation for ABI specific details.
Definition: ABIInfoImpl.h:21
ABIArgInfo classifyArgumentType(QualType RetTy) const
Definition: ABIInfoImpl.cpp:17
RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, AggValueSlot Slot) const override
EmitVAArg - Emit the target dependent code to load a value of.
Definition: ABIInfoImpl.cpp:75
ABIArgInfo classifyReturnType(QualType RetTy) const
Definition: ABIInfoImpl.cpp:46
void computeInfo(CGFunctionInfo &FI) const override
Definition: ABIInfoImpl.cpp:68
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:47
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Definition: TargetInfo.h:382
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
Definition: TargetInfo.cpp:155
const T & getABIInfo() const
Definition: TargetInfo.h:57
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:106
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA.
Definition: TargetInfo.cpp:125
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
Definition: TargetInfo.h:76
virtual bool shouldEmitDWARFBitFieldSeparators() const
Definition: TargetInfo.h:380
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
Definition: TargetInfo.cpp:120
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::AtomicRMWInst &RMW) const
Allow the target to apply other metadata to an atomic instruction.
Definition: TargetInfo.h:338
virtual LangAS getASTAllocaAddressSpace() const
Get the AST address space for alloca.
Definition: TargetInfo.h:298
virtual llvm::Value * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Type *BlockTy) const
Create an OpenCL kernel for an enqueued block.
Definition: TargetInfo.cpp:178
virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const
Provides a convenient hook to handle extra target-specific globals.
Definition: TargetInfo.h:86
virtual bool shouldEmitStaticExternCAliases() const
Definition: TargetInfo.h:375
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:580
bool hasAttr() const
Definition: DeclBase.h:584
Represents a member of a struct/union/class.
Definition: Decl.h:3030
Represents a function declaration or definition.
Definition: Decl.h:1932
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:4539
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4313
ExtInfo getExtInfo() const
Definition: Type.h:4647
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:476
A (possibly-)qualified type.
Definition: Type.h:941
Represents a struct/union/class.
Definition: Decl.h:4145
bool hasFlexibleArrayMember() const
Definition: Decl.h:4178
field_range fields() const
Definition: Decl.h:4351
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5970
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
TargetOptions & getTargetOpts() const
Retrieve the target options.
Definition: TargetInfo.h:312
virtual std::optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory.
Definition: TargetInfo.h:1647
bool allowAMDGPUUnsafeFPAtomics() const
Returns whether or not the AMDGPU unsafe floating point atomics are allowed.
Definition: TargetInfo.h:1048
llvm::CodeObjectVersionKind CodeObjectVersion
Code object version for AMDGPU.
Definition: TargetOptions.h:82
The base class of the type hierarchy.
Definition: Type.h:1829
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8545
Represents a variable declaration or definition.
Definition: Decl.h:879
Represents a GCC generic vector type.
Definition: Type.h:4026
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM)
Definition: AMDGPU.cpp:711
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Cast(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2072
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:66
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
const FunctionProtoType * T
SyncScope
Defines synch scope values used internally by clang.
Definition: SyncScope.h:42
@ CC_OpenCLKernel
Definition: Specifiers.h:292
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:86
unsigned long uint64_t
unsigned int uint32_t