clang 20.0.0git
CGDecl.cpp
Go to the documentation of this file.
1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Decl nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBlocks.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGOpenCLRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "EHScopeStack.h"
23#include "PatternInit.h"
24#include "TargetInfo.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/CharUnits.h"
28#include "clang/AST/Decl.h"
29#include "clang/AST/DeclObjC.h"
35#include "clang/Sema/Sema.h"
36#include "llvm/Analysis/ConstantFolding.h"
37#include "llvm/Analysis/ValueTracking.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/Instructions.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include <optional>
44
45using namespace clang;
46using namespace CodeGen;
47
48static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
49 "Clang max alignment greater than what LLVM supports?");
50
51void CodeGenFunction::EmitDecl(const Decl &D) {
52 switch (D.getKind()) {
53 case Decl::BuiltinTemplate:
54 case Decl::TranslationUnit:
55 case Decl::ExternCContext:
56 case Decl::Namespace:
57 case Decl::UnresolvedUsingTypename:
58 case Decl::ClassTemplateSpecialization:
59 case Decl::ClassTemplatePartialSpecialization:
60 case Decl::VarTemplateSpecialization:
61 case Decl::VarTemplatePartialSpecialization:
62 case Decl::TemplateTypeParm:
63 case Decl::UnresolvedUsingValue:
64 case Decl::NonTypeTemplateParm:
65 case Decl::CXXDeductionGuide:
66 case Decl::CXXMethod:
67 case Decl::CXXConstructor:
68 case Decl::CXXDestructor:
69 case Decl::CXXConversion:
70 case Decl::Field:
71 case Decl::MSProperty:
72 case Decl::IndirectField:
73 case Decl::ObjCIvar:
74 case Decl::ObjCAtDefsField:
75 case Decl::ParmVar:
76 case Decl::ImplicitParam:
77 case Decl::ClassTemplate:
78 case Decl::VarTemplate:
79 case Decl::FunctionTemplate:
80 case Decl::TypeAliasTemplate:
81 case Decl::TemplateTemplateParm:
82 case Decl::ObjCMethod:
83 case Decl::ObjCCategory:
84 case Decl::ObjCProtocol:
85 case Decl::ObjCInterface:
86 case Decl::ObjCCategoryImpl:
87 case Decl::ObjCImplementation:
88 case Decl::ObjCProperty:
89 case Decl::ObjCCompatibleAlias:
90 case Decl::PragmaComment:
91 case Decl::PragmaDetectMismatch:
92 case Decl::AccessSpec:
93 case Decl::LinkageSpec:
94 case Decl::Export:
95 case Decl::ObjCPropertyImpl:
96 case Decl::FileScopeAsm:
97 case Decl::TopLevelStmt:
98 case Decl::Friend:
99 case Decl::FriendTemplate:
100 case Decl::Block:
101 case Decl::Captured:
102 case Decl::UsingShadow:
103 case Decl::ConstructorUsingShadow:
104 case Decl::ObjCTypeParam:
105 case Decl::Binding:
106 case Decl::UnresolvedUsingIfExists:
107 case Decl::HLSLBuffer:
108 llvm_unreachable("Declaration should not be in declstmts!");
109 case Decl::Record: // struct/union/class X;
110 case Decl::CXXRecord: // struct/union/class X; [C++]
111 if (CGDebugInfo *DI = getDebugInfo())
112 if (cast<RecordDecl>(D).getDefinition())
113 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
114 return;
115 case Decl::Enum: // enum X;
116 if (CGDebugInfo *DI = getDebugInfo())
117 if (cast<EnumDecl>(D).getDefinition())
118 DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
119 return;
120 case Decl::Function: // void X();
121 case Decl::EnumConstant: // enum ? { X = ? }
122 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
123 case Decl::Label: // __label__ x;
124 case Decl::Import:
125 case Decl::MSGuid: // __declspec(uuid("..."))
126 case Decl::UnnamedGlobalConstant:
127 case Decl::TemplateParamObject:
128 case Decl::OMPThreadPrivate:
129 case Decl::OMPAllocate:
130 case Decl::OMPCapturedExpr:
131 case Decl::OMPRequires:
132 case Decl::Empty:
133 case Decl::Concept:
134 case Decl::ImplicitConceptSpecialization:
135 case Decl::LifetimeExtendedTemporary:
136 case Decl::RequiresExprBody:
137 // None of these decls require codegen support.
138 return;
139
140 case Decl::NamespaceAlias:
141 if (CGDebugInfo *DI = getDebugInfo())
142 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
143 return;
144 case Decl::Using: // using X; [C++]
145 if (CGDebugInfo *DI = getDebugInfo())
146 DI->EmitUsingDecl(cast<UsingDecl>(D));
147 return;
148 case Decl::UsingEnum: // using enum X; [C++]
149 if (CGDebugInfo *DI = getDebugInfo())
150 DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
151 return;
152 case Decl::UsingPack:
153 for (auto *Using : cast<UsingPackDecl>(D).expansions())
154 EmitDecl(*Using);
155 return;
156 case Decl::UsingDirective: // using namespace X; [C++]
157 if (CGDebugInfo *DI = getDebugInfo())
158 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
159 return;
160 case Decl::Var:
161 case Decl::Decomposition: {
162 const VarDecl &VD = cast<VarDecl>(D);
163 assert(VD.isLocalVarDecl() &&
164 "Should not see file-scope variables inside a function!");
165 EmitVarDecl(VD);
166 if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
167 for (auto *B : DD->bindings())
168 if (auto *HD = B->getHoldingVar())
169 EmitVarDecl(*HD);
170 return;
171 }
172
173 case Decl::OMPDeclareReduction:
174 return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
175
176 case Decl::OMPDeclareMapper:
177 return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
178
179 case Decl::Typedef: // typedef int X;
180 case Decl::TypeAlias: { // using X = int; [C++0x]
181 QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
182 if (CGDebugInfo *DI = getDebugInfo())
183 DI->EmitAndRetainType(Ty);
184 if (Ty->isVariablyModifiedType())
186 return;
187 }
188 }
189}
190
191/// EmitVarDecl - This method handles emission of any variable declaration
192/// inside a function, including static vars etc.
194 if (D.hasExternalStorage())
195 // Don't emit it now, allow it to be emitted lazily on its first use.
196 return;
197
198 // Some function-scope variable does not have static storage but still
199 // needs to be emitted like a static variable, e.g. a function-scope
200 // variable in constant address space in OpenCL.
201 if (D.getStorageDuration() != SD_Automatic) {
202 // Static sampler variables translated to function calls.
203 if (D.getType()->isSamplerT())
204 return;
205
206 llvm::GlobalValue::LinkageTypes Linkage =
208
209 // FIXME: We need to force the emission/use of a guard variable for
210 // some variables even if we can constant-evaluate them because
211 // we can't guarantee every translation unit will constant-evaluate them.
212
213 return EmitStaticVarDecl(D, Linkage);
214 }
215
216 if (D.getType().getAddressSpace() == LangAS::opencl_local)
218
219 assert(D.hasLocalStorage());
220 return EmitAutoVarDecl(D);
221}
222
223static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
224 if (CGM.getLangOpts().CPlusPlus)
225 return CGM.getMangledName(&D).str();
226
227 // If this isn't C++, we don't need a mangled name, just a pretty one.
228 assert(!D.isExternallyVisible() && "name shouldn't matter");
229 std::string ContextName;
230 const DeclContext *DC = D.getDeclContext();
231 if (auto *CD = dyn_cast<CapturedDecl>(DC))
232 DC = cast<DeclContext>(CD->getNonClosureContext());
233 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
234 ContextName = std::string(CGM.getMangledName(FD));
235 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
236 ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
237 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
238 ContextName = OMD->getSelector().getAsString();
239 else
240 llvm_unreachable("Unknown context for static var decl");
241
242 ContextName += "." + D.getNameAsString();
243 return ContextName;
244}
245
247 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
248 // In general, we don't always emit static var decls once before we reference
249 // them. It is possible to reference them before emitting the function that
250 // contains them, and it is possible to emit the containing function multiple
251 // times.
252 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
253 return ExistingGV;
254
255 QualType Ty = D.getType();
256 assert(Ty->isConstantSizeType() && "VLAs can't be static");
257
258 // Use the label if the variable is renamed with the asm-label extension.
259 std::string Name;
260 if (D.hasAttr<AsmLabelAttr>())
261 Name = std::string(getMangledName(&D));
262 else
263 Name = getStaticDeclName(*this, D);
264
265 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
267 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
268
269 // OpenCL variables in local address space and CUDA shared
270 // variables cannot have an initializer.
271 llvm::Constant *Init = nullptr;
273 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
274 Init = llvm::UndefValue::get(LTy);
275 else
277
278 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
279 getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
280 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
281 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
282
283 if (supportsCOMDAT() && GV->isWeakForLinker())
284 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
285
286 if (D.getTLSKind())
287 setTLSMode(GV, D);
288
289 setGVProperties(GV, &D);
290 getTargetCodeGenInfo().setTargetAttributes(cast<Decl>(&D), GV, *this);
291
292 // Make sure the result is of the correct type.
293 LangAS ExpectedAS = Ty.getAddressSpace();
294 llvm::Constant *Addr = GV;
295 if (AS != ExpectedAS) {
297 *this, GV, AS, ExpectedAS,
298 llvm::PointerType::get(getLLVMContext(),
299 getContext().getTargetAddressSpace(ExpectedAS)));
300 }
301
303
304 // Ensure that the static local gets initialized by making sure the parent
305 // function gets emitted eventually.
306 const Decl *DC = cast<Decl>(D.getDeclContext());
307
308 // We can't name blocks or captured statements directly, so try to emit their
309 // parents.
310 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
311 DC = DC->getNonClosureContext();
312 // FIXME: Ensure that global blocks get emitted.
313 if (!DC)
314 return Addr;
315 }
316
317 GlobalDecl GD;
318 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
319 GD = GlobalDecl(CD, Ctor_Base);
320 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
321 GD = GlobalDecl(DD, Dtor_Base);
322 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
323 GD = GlobalDecl(FD);
324 else {
325 // Don't do anything for Obj-C method decls or global closures. We should
326 // never defer them.
327 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
328 }
329 if (GD.getDecl()) {
330 // Disable emission of the parent function for the OpenMP device codegen.
332 (void)GetAddrOfGlobal(GD);
333 }
334
335 return Addr;
336}
337
338/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
339/// global variable that has already been created for it. If the initializer
340/// has a different type than GV does, this may free GV and return a different
341/// one. Otherwise it just returns GV.
342llvm::GlobalVariable *
344 llvm::GlobalVariable *GV) {
345 ConstantEmitter emitter(*this);
346 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
347
348 // If constant emission failed, then this should be a C++ static
349 // initializer.
350 if (!Init) {
351 if (!getLangOpts().CPlusPlus)
352 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
353 else if (D.hasFlexibleArrayInit(getContext()))
354 CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
355 else if (HaveInsertPoint()) {
356 // Since we have a static initializer, this global variable can't
357 // be constant.
358 GV->setConstant(false);
359
360 EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
361 }
362 return GV;
363 }
364
365#ifndef NDEBUG
366 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
367 D.getFlexibleArrayInitChars(getContext());
369 CGM.getDataLayout().getTypeAllocSize(Init->getType()));
370 assert(VarSize == CstSize && "Emitted constant has unexpected size");
371#endif
372
373 bool NeedsDtor =
374 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
375
376 GV->setConstant(
377 D.getType().isConstantStorage(getContext(), true, !NeedsDtor));
378 GV->replaceInitializer(Init);
379
380 emitter.finalize(GV);
381
382 if (NeedsDtor && HaveInsertPoint()) {
383 // We have a constant initializer, but a nontrivial destructor. We still
384 // need to perform a guarded "initialization" in order to register the
385 // destructor.
386 EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
387 }
388
389 return GV;
390}
391
393 llvm::GlobalValue::LinkageTypes Linkage) {
394 // Check to see if we already have a global variable for this
395 // declaration. This can happen when double-emitting function
396 // bodies, e.g. with complete and base constructors.
397 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
398 CharUnits alignment = getContext().getDeclAlign(&D);
399
400 // Store into LocalDeclMap before generating initializer to handle
401 // circular references.
402 llvm::Type *elemTy = ConvertTypeForMem(D.getType());
403 setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
404
405 // We can't have a VLA here, but we can have a pointer to a VLA,
406 // even though that doesn't really make any sense.
407 // Make sure to evaluate VLA bounds now so that we have them for later.
408 if (D.getType()->isVariablyModifiedType())
409 EmitVariablyModifiedType(D.getType());
410
411 // Save the type in case adding the initializer forces a type change.
412 llvm::Type *expectedType = addr->getType();
413
414 llvm::GlobalVariable *var =
415 cast<llvm::GlobalVariable>(addr->stripPointerCasts());
416
417 // CUDA's local and local static __shared__ variables should not
418 // have any non-empty initializers. This is ensured by Sema.
419 // Whatever initializer such variable may have when it gets here is
420 // a no-op and should not be emitted.
421 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
422 D.hasAttr<CUDASharedAttr>();
423 // If this value has an initializer, emit it.
424 if (D.getInit() && !isCudaSharedVar)
426
427 var->setAlignment(alignment.getAsAlign());
428
429 if (D.hasAttr<AnnotateAttr>())
431
432 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
433 var->addAttribute("bss-section", SA->getName());
434 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
435 var->addAttribute("data-section", SA->getName());
436 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
437 var->addAttribute("rodata-section", SA->getName());
438 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
439 var->addAttribute("relro-section", SA->getName());
440
441 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
442 var->setSection(SA->getName());
443
444 if (D.hasAttr<RetainAttr>())
445 CGM.addUsedGlobal(var);
446 else if (D.hasAttr<UsedAttr>())
448
449 if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
451
452 // We may have to cast the constant because of the initializer
453 // mismatch above.
454 //
455 // FIXME: It is really dangerous to store this in the map; if anyone
456 // RAUW's the GV uses of this constant will be invalid.
457 llvm::Constant *castedAddr =
458 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
459 LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
460 CGM.setStaticLocalDeclAddress(&D, castedAddr);
461
463
464 // Emit global variable debug descriptor for static vars.
466 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
468 DI->EmitGlobalVariable(var, &D);
469 }
470}
471
472namespace {
473 struct DestroyObject final : EHScopeStack::Cleanup {
474 DestroyObject(Address addr, QualType type,
475 CodeGenFunction::Destroyer *destroyer,
476 bool useEHCleanupForArray)
477 : addr(addr), type(type), destroyer(destroyer),
478 useEHCleanupForArray(useEHCleanupForArray) {}
479
480 Address addr;
482 CodeGenFunction::Destroyer *destroyer;
483 bool useEHCleanupForArray;
484
485 void Emit(CodeGenFunction &CGF, Flags flags) override {
486 // Don't use an EH cleanup recursively from an EH cleanup.
487 bool useEHCleanupForArray =
488 flags.isForNormalCleanup() && this->useEHCleanupForArray;
489
490 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
491 }
492 };
493
494 template <class Derived>
495 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
496 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
497 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
498
499 llvm::Value *NRVOFlag;
500 Address Loc;
501 QualType Ty;
502
503 void Emit(CodeGenFunction &CGF, Flags flags) override {
504 // Along the exceptions path we always execute the dtor.
505 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
506
507 llvm::BasicBlock *SkipDtorBB = nullptr;
508 if (NRVO) {
509 // If we exited via NRVO, we skip the destructor call.
510 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
511 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
512 llvm::Value *DidNRVO =
513 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
514 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
515 CGF.EmitBlock(RunDtorBB);
516 }
517
518 static_cast<Derived *>(this)->emitDestructorCall(CGF);
519
520 if (NRVO) CGF.EmitBlock(SkipDtorBB);
521 }
522
523 virtual ~DestroyNRVOVariable() = default;
524 };
525
526 struct DestroyNRVOVariableCXX final
527 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
528 DestroyNRVOVariableCXX(Address addr, QualType type,
529 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
530 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
531 Dtor(Dtor) {}
532
533 const CXXDestructorDecl *Dtor;
534
535 void emitDestructorCall(CodeGenFunction &CGF) {
537 /*ForVirtualBase=*/false,
538 /*Delegating=*/false, Loc, Ty);
539 }
540 };
541
542 struct DestroyNRVOVariableC final
543 : DestroyNRVOVariable<DestroyNRVOVariableC> {
544 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
545 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
546
547 void emitDestructorCall(CodeGenFunction &CGF) {
548 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
549 }
550 };
551
552 struct CallStackRestore final : EHScopeStack::Cleanup {
553 Address Stack;
554 CallStackRestore(Address Stack) : Stack(Stack) {}
555 bool isRedundantBeforeReturn() override { return true; }
556 void Emit(CodeGenFunction &CGF, Flags flags) override {
557 llvm::Value *V = CGF.Builder.CreateLoad(Stack);
558 CGF.Builder.CreateStackRestore(V);
559 }
560 };
561
562 struct KmpcAllocFree final : EHScopeStack::Cleanup {
563 std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
564 KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
565 : AddrSizePair(AddrSizePair) {}
566 void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
567 auto &RT = CGF.CGM.getOpenMPRuntime();
568 RT.getKmpcFreeShared(CGF, AddrSizePair);
569 }
570 };
571
572 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
573 const VarDecl &Var;
574 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
575
576 void Emit(CodeGenFunction &CGF, Flags flags) override {
577 // Compute the address of the local variable, in case it's a
578 // byref or something.
579 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
581 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
583 CGF.EmitExtendGCLifetime(value);
584 }
585 };
586
587 struct CallCleanupFunction final : EHScopeStack::Cleanup {
588 llvm::Constant *CleanupFn;
589 const CGFunctionInfo &FnInfo;
590 const VarDecl &Var;
591
592 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
593 const VarDecl *Var)
594 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
595
596 void Emit(CodeGenFunction &CGF, Flags flags) override {
597 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
599 // Compute the address of the local variable, in case it's a byref
600 // or something.
601 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
602
603 // In some cases, the type of the function argument will be different from
604 // the type of the pointer. An example of this is
605 // void f(void* arg);
606 // __attribute__((cleanup(f))) void *g;
607 //
608 // To fix this we insert a bitcast here.
609 QualType ArgTy = FnInfo.arg_begin()->type;
610 llvm::Value *Arg =
611 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
612
613 CallArgList Args;
614 Args.add(RValue::get(Arg),
615 CGF.getContext().getPointerType(Var.getType()));
616 auto Callee = CGCallee::forDirect(CleanupFn);
617 CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
618 }
619 };
620} // end anonymous namespace
621
622/// EmitAutoVarWithLifetime - Does the setup required for an automatic
623/// variable with lifetime.
625 Address addr,
626 Qualifiers::ObjCLifetime lifetime) {
627 switch (lifetime) {
629 llvm_unreachable("present but none");
630
632 // nothing to do
633 break;
634
636 CodeGenFunction::Destroyer *destroyer =
637 (var.hasAttr<ObjCPreciseLifetimeAttr>()
640
641 CleanupKind cleanupKind = CGF.getARCCleanupKind();
642 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
643 cleanupKind & EHCleanup);
644 break;
645 }
647 // nothing to do
648 break;
649
651 // __weak objects always get EH cleanups; otherwise, exceptions
652 // could cause really nasty crashes instead of mere leaks.
653 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
655 /*useEHCleanup*/ true);
656 break;
657 }
658}
659
660static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
661 if (const Expr *e = dyn_cast<Expr>(s)) {
662 // Skip the most common kinds of expressions that make
663 // hierarchy-walking expensive.
664 s = e = e->IgnoreParenCasts();
665
666 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
667 return (ref->getDecl() == &var);
668 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
669 const BlockDecl *block = be->getBlockDecl();
670 for (const auto &I : block->captures()) {
671 if (I.getVariable() == &var)
672 return true;
673 }
674 }
675 }
676
677 for (const Stmt *SubStmt : s->children())
678 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
679 if (SubStmt && isAccessedBy(var, SubStmt))
680 return true;
681
682 return false;
683}
684
685static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
686 if (!decl) return false;
687 if (!isa<VarDecl>(decl)) return false;
688 const VarDecl *var = cast<VarDecl>(decl);
689 return isAccessedBy(*var, e);
690}
691
693 const LValue &destLV, const Expr *init) {
694 bool needsCast = false;
695
696 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
697 switch (castExpr->getCastKind()) {
698 // Look through casts that don't require representation changes.
699 case CK_NoOp:
700 case CK_BitCast:
701 case CK_BlockPointerToObjCPointerCast:
702 needsCast = true;
703 break;
704
705 // If we find an l-value to r-value cast from a __weak variable,
706 // emit this operation as a copy or move.
707 case CK_LValueToRValue: {
708 const Expr *srcExpr = castExpr->getSubExpr();
709 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
710 return false;
711
712 // Emit the source l-value.
713 LValue srcLV = CGF.EmitLValue(srcExpr);
714
715 // Handle a formal type change to avoid asserting.
716 auto srcAddr = srcLV.getAddress();
717 if (needsCast) {
718 srcAddr = srcAddr.withElementType(destLV.getAddress().getElementType());
719 }
720
721 // If it was an l-value, use objc_copyWeak.
722 if (srcExpr->isLValue()) {
723 CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
724 } else {
725 assert(srcExpr->isXValue());
726 CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
727 }
728 return true;
729 }
730
731 // Stop at anything else.
732 default:
733 return false;
734 }
735
736 init = castExpr->getSubExpr();
737 }
738 return false;
739}
740
742 LValue &lvalue,
743 const VarDecl *var) {
744 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
745}
746
747void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
749 if (!SanOpts.has(SanitizerKind::NullabilityAssign))
750 return;
751
752 auto Nullability = LHS.getType()->getNullability();
753 if (!Nullability || *Nullability != NullabilityKind::NonNull)
754 return;
755
756 // Check if the right hand side of the assignment is nonnull, if the left
757 // hand side must be nonnull.
758 SanitizerScope SanScope(this);
759 llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
760 llvm::Constant *StaticData[] = {
762 llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
763 llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
764 EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
765 SanitizerHandler::TypeMismatch, StaticData, RHS);
766}
767
768void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
769 LValue lvalue, bool capturedByInit) {
770 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
771 if (!lifetime) {
772 llvm::Value *value = EmitScalarExpr(init);
773 if (capturedByInit)
774 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
775 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
776 EmitStoreThroughLValue(RValue::get(value), lvalue, true);
777 return;
778 }
779
780 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
781 init = DIE->getExpr();
782
783 // If we're emitting a value with lifetime, we have to do the
784 // initialization *before* we leave the cleanup scopes.
785 if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
786 CodeGenFunction::RunCleanupsScope Scope(*this);
787 return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
788 }
789
790 // We have to maintain the illusion that the variable is
791 // zero-initialized. If the variable might be accessed in its
792 // initializer, zero-initialize before running the initializer, then
793 // actually perform the initialization with an assign.
794 bool accessedByInit = false;
795 if (lifetime != Qualifiers::OCL_ExplicitNone)
796 accessedByInit = (capturedByInit || isAccessedBy(D, init));
797 if (accessedByInit) {
798 LValue tempLV = lvalue;
799 // Drill down to the __block object if necessary.
800 if (capturedByInit) {
801 // We can use a simple GEP for this because it can't have been
802 // moved yet.
804 cast<VarDecl>(D),
805 /*follow*/ false));
806 }
807
808 auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
809 llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
810
811 // If __weak, we want to use a barrier under certain conditions.
812 if (lifetime == Qualifiers::OCL_Weak)
813 EmitARCInitWeak(tempLV.getAddress(), zero);
814
815 // Otherwise just do a simple store.
816 else
817 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
818 }
819
820 // Emit the initializer.
821 llvm::Value *value = nullptr;
822
823 switch (lifetime) {
825 llvm_unreachable("present but none");
826
828 if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
829 value = EmitARCRetainScalarExpr(init);
830 break;
831 }
832 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
833 // that we omit the retain, and causes non-autoreleased return values to be
834 // immediately released.
835 [[fallthrough]];
836 }
837
840 break;
841
843 // If it's not accessed by the initializer, try to emit the
844 // initialization with a copy or move.
845 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
846 return;
847 }
848
849 // No way to optimize a producing initializer into this. It's not
850 // worth optimizing for, because the value will immediately
851 // disappear in the common case.
852 value = EmitScalarExpr(init);
853
854 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
855 if (accessedByInit)
856 EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
857 else
858 EmitARCInitWeak(lvalue.getAddress(), value);
859 return;
860 }
861
864 break;
865 }
866
867 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
868
869 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
870
871 // If the variable might have been accessed by its initializer, we
872 // might have to initialize with a barrier. We have to do this for
873 // both __weak and __strong, but __weak got filtered out above.
874 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
875 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
876 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
878 return;
879 }
880
881 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
882}
883
884/// Decide whether we can emit the non-zero parts of the specified initializer
885/// with equal or fewer than NumStores scalar stores.
886static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
887 unsigned &NumStores) {
888 // Zero and Undef never requires any extra stores.
889 if (isa<llvm::ConstantAggregateZero>(Init) ||
890 isa<llvm::ConstantPointerNull>(Init) ||
891 isa<llvm::UndefValue>(Init))
892 return true;
893 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
894 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
895 isa<llvm::ConstantExpr>(Init))
896 return Init->isNullValue() || NumStores--;
897
898 // See if we can emit each element.
899 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
900 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
901 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
902 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
903 return false;
904 }
905 return true;
906 }
907
908 if (llvm::ConstantDataSequential *CDS =
909 dyn_cast<llvm::ConstantDataSequential>(Init)) {
910 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
911 llvm::Constant *Elt = CDS->getElementAsConstant(i);
912 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
913 return false;
914 }
915 return true;
916 }
917
918 // Anything else is hard and scary.
919 return false;
920}
921
922/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
923/// the scalar stores that would be required.
925 llvm::Constant *Init, Address Loc,
926 bool isVolatile, CGBuilderTy &Builder,
927 bool IsAutoInit) {
928 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
929 "called emitStoresForInitAfterBZero for zero or undef value.");
930
931 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
932 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
933 isa<llvm::ConstantExpr>(Init)) {
934 auto *I = Builder.CreateStore(Init, Loc, isVolatile);
935 if (IsAutoInit)
936 I->addAnnotationMetadata("auto-init");
937 return;
938 }
939
940 if (llvm::ConstantDataSequential *CDS =
941 dyn_cast<llvm::ConstantDataSequential>(Init)) {
942 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
943 llvm::Constant *Elt = CDS->getElementAsConstant(i);
944
945 // If necessary, get a pointer to the element and emit it.
946 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
948 CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
949 Builder, IsAutoInit);
950 }
951 return;
952 }
953
954 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
955 "Unknown value type!");
956
957 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
958 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
959
960 // If necessary, get a pointer to the element and emit it.
961 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
963 Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
964 isVolatile, Builder, IsAutoInit);
965 }
966}
967
968/// Decide whether we should use bzero plus some stores to initialize a local
969/// variable instead of using a memcpy from a constant global. It is beneficial
970/// to use bzero if the global is all zeros, or mostly zeros and large.
971static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
972 uint64_t GlobalSize) {
973 // If a global is all zeros, always use a bzero.
974 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
975
976 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
977 // do it if it will require 6 or fewer scalar stores.
978 // TODO: Should budget depends on the size? Avoiding a large global warrants
979 // plopping in more stores.
980 unsigned StoreBudget = 6;
981 uint64_t SizeLimit = 32;
982
983 return GlobalSize > SizeLimit &&
985}
986
987/// Decide whether we should use memset to initialize a local variable instead
988/// of using a memcpy from a constant global. Assumes we've already decided to
989/// not user bzero.
990/// FIXME We could be more clever, as we are for bzero above, and generate
991/// memset followed by stores. It's unclear that's worth the effort.
992static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
993 uint64_t GlobalSize,
994 const llvm::DataLayout &DL) {
995 uint64_t SizeLimit = 32;
996 if (GlobalSize <= SizeLimit)
997 return nullptr;
998 return llvm::isBytewiseValue(Init, DL);
999}
1000
1001/// Decide whether we want to split a constant structure or array store into a
1002/// sequence of its fields' stores. This may cost us code size and compilation
1003/// speed, but plays better with store optimizations.
1005 uint64_t GlobalByteSize) {
1006 // Don't break things that occupy more than one cacheline.
1007 uint64_t ByteSizeLimit = 64;
1008 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1009 return false;
1010 if (GlobalByteSize <= ByteSizeLimit)
1011 return true;
1012 return false;
1013}
1014
1015enum class IsPattern { No, Yes };
1016
1017/// Generate a constant filled with either a pattern or zeroes.
1018static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1019 llvm::Type *Ty) {
1020 if (isPattern == IsPattern::Yes)
1021 return initializationPatternFor(CGM, Ty);
1022 else
1023 return llvm::Constant::getNullValue(Ty);
1024}
1025
1026static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1027 llvm::Constant *constant);
1028
1029/// Helper function for constWithPadding() to deal with padding in structures.
1030static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1031 IsPattern isPattern,
1032 llvm::StructType *STy,
1033 llvm::Constant *constant) {
1034 const llvm::DataLayout &DL = CGM.getDataLayout();
1035 const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1036 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1037 unsigned SizeSoFar = 0;
1039 bool NestedIntact = true;
1040 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1041 unsigned CurOff = Layout->getElementOffset(i);
1042 if (SizeSoFar < CurOff) {
1043 assert(!STy->isPacked());
1044 auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1045 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1046 }
1047 llvm::Constant *CurOp;
1048 if (constant->isZeroValue())
1049 CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1050 else
1051 CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1052 auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1053 if (CurOp != NewOp)
1054 NestedIntact = false;
1055 Values.push_back(NewOp);
1056 SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1057 }
1058 unsigned TotalSize = Layout->getSizeInBytes();
1059 if (SizeSoFar < TotalSize) {
1060 auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1061 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1062 }
1063 if (NestedIntact && Values.size() == STy->getNumElements())
1064 return constant;
1065 return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1066}
1067
1068/// Replace all padding bytes in a given constant with either a pattern byte or
1069/// 0x00.
1070static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1071 llvm::Constant *constant) {
1072 llvm::Type *OrigTy = constant->getType();
1073 if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1074 return constStructWithPadding(CGM, isPattern, STy, constant);
1075 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1077 uint64_t Size = ArrayTy->getNumElements();
1078 if (!Size)
1079 return constant;
1080 llvm::Type *ElemTy = ArrayTy->getElementType();
1081 bool ZeroInitializer = constant->isNullValue();
1082 llvm::Constant *OpValue, *PaddedOp;
1083 if (ZeroInitializer) {
1084 OpValue = llvm::Constant::getNullValue(ElemTy);
1085 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1086 }
1087 for (unsigned Op = 0; Op != Size; ++Op) {
1088 if (!ZeroInitializer) {
1089 OpValue = constant->getAggregateElement(Op);
1090 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1091 }
1092 Values.push_back(PaddedOp);
1093 }
1094 auto *NewElemTy = Values[0]->getType();
1095 if (NewElemTy == ElemTy)
1096 return constant;
1097 auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1098 return llvm::ConstantArray::get(NewArrayTy, Values);
1099 }
1100 // FIXME: Add handling for tail padding in vectors. Vectors don't
1101 // have padding between or inside elements, but the total amount of
1102 // data can be less than the allocated size.
1103 return constant;
1104}
1105
1107 llvm::Constant *Constant,
1108 CharUnits Align) {
1109 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1110 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1111 if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1112 return CC->getNameAsString();
1113 if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1114 return CD->getNameAsString();
1115 return std::string(getMangledName(FD));
1116 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1117 return OM->getNameAsString();
1118 } else if (isa<BlockDecl>(DC)) {
1119 return "<block>";
1120 } else if (isa<CapturedDecl>(DC)) {
1121 return "<captured>";
1122 } else {
1123 llvm_unreachable("expected a function or method");
1124 }
1125 };
1126
1127 // Form a simple per-variable cache of these values in case we find we
1128 // want to reuse them.
1129 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1130 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1131 auto *Ty = Constant->getType();
1132 bool isConstant = true;
1133 llvm::GlobalVariable *InsertBefore = nullptr;
1134 unsigned AS =
1136 std::string Name;
1137 if (D.hasGlobalStorage())
1138 Name = getMangledName(&D).str() + ".const";
1139 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1140 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1141 else
1142 llvm_unreachable("local variable has no parent function or method");
1143 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1144 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1145 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1146 GV->setAlignment(Align.getAsAlign());
1147 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1148 CacheEntry = GV;
1149 } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1150 CacheEntry->setAlignment(Align.getAsAlign());
1151 }
1152
1153 return Address(CacheEntry, CacheEntry->getValueType(), Align);
1154}
1155
1157 const VarDecl &D,
1158 CGBuilderTy &Builder,
1159 llvm::Constant *Constant,
1160 CharUnits Align) {
1161 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1162 return SrcPtr.withElementType(CGM.Int8Ty);
1163}
1164
1166 Address Loc, bool isVolatile,
1167 CGBuilderTy &Builder,
1168 llvm::Constant *constant, bool IsAutoInit) {
1169 auto *Ty = constant->getType();
1170 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1171 if (!ConstantSize)
1172 return;
1173
1174 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1175 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1176 if (canDoSingleStore) {
1177 auto *I = Builder.CreateStore(constant, Loc, isVolatile);
1178 if (IsAutoInit)
1179 I->addAnnotationMetadata("auto-init");
1180 return;
1181 }
1182
1183 auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1184
1185 // If the initializer is all or mostly the same, codegen with bzero / memset
1186 // then do a few stores afterward.
1187 if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1188 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
1189 SizeVal, isVolatile);
1190 if (IsAutoInit)
1191 I->addAnnotationMetadata("auto-init");
1192
1193 bool valueAlreadyCorrect =
1194 constant->isNullValue() || isa<llvm::UndefValue>(constant);
1195 if (!valueAlreadyCorrect) {
1196 Loc = Loc.withElementType(Ty);
1197 emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
1198 IsAutoInit);
1199 }
1200 return;
1201 }
1202
1203 // If the initializer is a repeated byte pattern, use memset.
1204 llvm::Value *Pattern =
1205 shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1206 if (Pattern) {
1207 uint64_t Value = 0x00;
1208 if (!isa<llvm::UndefValue>(Pattern)) {
1209 const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1210 assert(AP.getBitWidth() <= 8);
1211 Value = AP.getLimitedValue();
1212 }
1213 auto *I = Builder.CreateMemSet(
1214 Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
1215 if (IsAutoInit)
1216 I->addAnnotationMetadata("auto-init");
1217 return;
1218 }
1219
1220 // If the initializer is small or trivialAutoVarInit is set, use a handful of
1221 // stores.
1222 bool IsTrivialAutoVarInitPattern =
1223 CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
1225 if (shouldSplitConstantStore(CGM, ConstantSize)) {
1226 if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1227 if (STy == Loc.getElementType() ||
1228 (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1229 const llvm::StructLayout *Layout =
1230 CGM.getDataLayout().getStructLayout(STy);
1231 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1232 CharUnits CurOff =
1233 CharUnits::fromQuantity(Layout->getElementOffset(i));
1234 Address EltPtr = Builder.CreateConstInBoundsByteGEP(
1235 Loc.withElementType(CGM.Int8Ty), CurOff);
1236 emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
1237 constant->getAggregateElement(i), IsAutoInit);
1238 }
1239 return;
1240 }
1241 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1242 if (ATy == Loc.getElementType() ||
1243 (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1244 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1245 Address EltPtr = Builder.CreateConstGEP(
1246 Loc.withElementType(ATy->getElementType()), i);
1247 emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
1248 constant->getAggregateElement(i), IsAutoInit);
1249 }
1250 return;
1251 }
1252 }
1253 }
1254
1255 // Copy from a global.
1256 auto *I =
1257 Builder.CreateMemCpy(Loc,
1259 CGM, D, Builder, constant, Loc.getAlignment()),
1260 SizeVal, isVolatile);
1261 if (IsAutoInit)
1262 I->addAnnotationMetadata("auto-init");
1263}
1264
1266 Address Loc, bool isVolatile,
1267 CGBuilderTy &Builder) {
1268 llvm::Type *ElTy = Loc.getElementType();
1269 llvm::Constant *constant =
1270 constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1271 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1272 /*IsAutoInit=*/true);
1273}
1274
1276 Address Loc, bool isVolatile,
1277 CGBuilderTy &Builder) {
1278 llvm::Type *ElTy = Loc.getElementType();
1279 llvm::Constant *constant = constWithPadding(
1280 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1281 assert(!isa<llvm::UndefValue>(constant));
1282 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1283 /*IsAutoInit=*/true);
1284}
1285
1286static bool containsUndef(llvm::Constant *constant) {
1287 auto *Ty = constant->getType();
1288 if (isa<llvm::UndefValue>(constant))
1289 return true;
1290 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1291 for (llvm::Use &Op : constant->operands())
1292 if (containsUndef(cast<llvm::Constant>(Op)))
1293 return true;
1294 return false;
1295}
1296
1297static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1298 llvm::Constant *constant) {
1299 auto *Ty = constant->getType();
1300 if (isa<llvm::UndefValue>(constant))
1301 return patternOrZeroFor(CGM, isPattern, Ty);
1302 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1303 return constant;
1304 if (!containsUndef(constant))
1305 return constant;
1306 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1307 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1308 auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1309 Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1310 }
1311 if (Ty->isStructTy())
1312 return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1313 if (Ty->isArrayTy())
1314 return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1315 assert(Ty->isVectorTy());
1316 return llvm::ConstantVector::get(Values);
1317}
1318
1319/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1320/// variable declaration with auto, register, or no storage class specifier.
1321/// These turn into simple stack objects, or GlobalValues depending on target.
1323 AutoVarEmission emission = EmitAutoVarAlloca(D);
1324 EmitAutoVarInit(emission);
1325 EmitAutoVarCleanups(emission);
1326}
1327
1328/// Emit a lifetime.begin marker if some criteria are satisfied.
1329/// \return a pointer to the temporary size Value if a marker was emitted, null
1330/// otherwise
1331llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1332 llvm::Value *Addr) {
1333 if (!ShouldEmitLifetimeMarkers)
1334 return nullptr;
1335
1336 assert(Addr->getType()->getPointerAddressSpace() ==
1337 CGM.getDataLayout().getAllocaAddrSpace() &&
1338 "Pointer should be in alloca address space");
1339 llvm::Value *SizeV = llvm::ConstantInt::get(
1340 Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
1341 llvm::CallInst *C =
1342 Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1343 C->setDoesNotThrow();
1344 return SizeV;
1345}
1346
1347void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1348 assert(Addr->getType()->getPointerAddressSpace() ==
1349 CGM.getDataLayout().getAllocaAddrSpace() &&
1350 "Pointer should be in alloca address space");
1351 llvm::CallInst *C =
1352 Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1353 C->setDoesNotThrow();
1354}
1355
1357 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1358 // For each dimension stores its QualType and corresponding
1359 // size-expression Value.
1362
1363 // Break down the array into individual dimensions.
1364 QualType Type1D = D.getType();
1365 while (getContext().getAsVariableArrayType(Type1D)) {
1366 auto VlaSize = getVLAElements1D(Type1D);
1367 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1368 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1369 else {
1370 // Generate a locally unique name for the size expression.
1371 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1372 SmallString<12> Buffer;
1373 StringRef NameRef = Name.toStringRef(Buffer);
1374 auto &Ident = getContext().Idents.getOwn(NameRef);
1375 VLAExprNames.push_back(&Ident);
1376 auto SizeExprAddr =
1377 CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1378 Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1379 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1380 Type1D.getUnqualifiedType());
1381 }
1382 Type1D = VlaSize.Type;
1383 }
1384
1385 if (!EmitDebugInfo)
1386 return;
1387
1388 // Register each dimension's size-expression with a DILocalVariable,
1389 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1390 // to describe this array.
1391 unsigned NameIdx = 0;
1392 for (auto &VlaSize : Dimensions) {
1393 llvm::Metadata *MD;
1394 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1395 MD = llvm::ConstantAsMetadata::get(C);
1396 else {
1397 // Create an artificial VarDecl to generate debug info for.
1398 const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1400 SizeTy->getScalarSizeInBits(), false);
1401 auto *ArtificialDecl = VarDecl::Create(
1402 getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1403 D.getLocation(), D.getLocation(), NameIdent, QT,
1404 getContext().CreateTypeSourceInfo(QT), SC_Auto);
1405 ArtificialDecl->setImplicit();
1406
1407 MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1408 Builder);
1409 }
1410 assert(MD && "No Size expression debug node created");
1411 DI->registerVLASizeExpression(VlaSize.Type, MD);
1412 }
1413}
1414
1415/// EmitAutoVarAlloca - Emit the alloca and debug information for a
1416/// local variable. Does not emit initialization or destruction.
1417CodeGenFunction::AutoVarEmission
1419 QualType Ty = D.getType();
1420 assert(
1423
1424 AutoVarEmission emission(D);
1425
1426 bool isEscapingByRef = D.isEscapingByref();
1427 emission.IsEscapingByRef = isEscapingByRef;
1428
1429 CharUnits alignment = getContext().getDeclAlign(&D);
1430
1431 // If the type is variably-modified, emit all the VLA sizes for it.
1432 if (Ty->isVariablyModifiedType())
1434
1435 auto *DI = getDebugInfo();
1436 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1437
1438 Address address = Address::invalid();
1439 RawAddress AllocaAddr = RawAddress::invalid();
1440 Address OpenMPLocalAddr = Address::invalid();
1441 if (CGM.getLangOpts().OpenMPIRBuilder)
1442 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1443 else
1444 OpenMPLocalAddr =
1445 getLangOpts().OpenMP
1447 : Address::invalid();
1448
1449 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1450
1451 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1452 address = OpenMPLocalAddr;
1453 AllocaAddr = OpenMPLocalAddr;
1454 } else if (Ty->isConstantSizeType()) {
1455 // If this value is an array or struct with a statically determinable
1456 // constant initializer, there are optimizations we can do.
1457 //
1458 // TODO: We should constant-evaluate the initializer of any variable,
1459 // as long as it is initialized by a constant expression. Currently,
1460 // isConstantInitializer produces wrong answers for structs with
1461 // reference or bitfield members, and a few other cases, and checking
1462 // for POD-ness protects us from some of these.
1463 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1464 (D.isConstexpr() ||
1465 ((Ty.isPODType(getContext()) ||
1466 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1467 D.getInit()->isConstantInitializer(getContext(), false)))) {
1468
1469 // If the variable's a const type, and it's neither an NRVO
1470 // candidate nor a __block variable and has no mutable members,
1471 // emit it as a global instead.
1472 // Exception is if a variable is located in non-constant address space
1473 // in OpenCL.
1474 bool NeedsDtor =
1475 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
1476 if ((!getLangOpts().OpenCL ||
1478 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1479 !isEscapingByRef &&
1480 Ty.isConstantStorage(getContext(), true, !NeedsDtor))) {
1481 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1482
1483 // Signal this condition to later callbacks.
1484 emission.Addr = Address::invalid();
1485 assert(emission.wasEmittedAsGlobal());
1486 return emission;
1487 }
1488
1489 // Otherwise, tell the initialization code that we're in this case.
1490 emission.IsConstantAggregate = true;
1491 }
1492
1493 // A normal fixed sized variable becomes an alloca in the entry block,
1494 // unless:
1495 // - it's an NRVO variable.
1496 // - we are compiling OpenMP and it's an OpenMP local variable.
1497 if (NRVO) {
1498 // The named return value optimization: allocate this variable in the
1499 // return slot, so that we can elide the copy when returning this
1500 // variable (C++0x [class.copy]p34).
1501 address = ReturnValue;
1502 AllocaAddr =
1505 ;
1506
1507 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1508 const auto *RD = RecordTy->getDecl();
1509 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1510 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1511 RD->isNonTrivialToPrimitiveDestroy()) {
1512 // Create a flag that is used to indicate when the NRVO was applied
1513 // to this variable. Set it to zero to indicate that NRVO was not
1514 // applied.
1515 llvm::Value *Zero = Builder.getFalse();
1516 RawAddress NRVOFlag =
1517 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1519 Builder.CreateStore(Zero, NRVOFlag);
1520
1521 // Record the NRVO flag for this variable.
1522 NRVOFlags[&D] = NRVOFlag.getPointer();
1523 emission.NRVOFlag = NRVOFlag.getPointer();
1524 }
1525 }
1526 } else {
1527 CharUnits allocaAlignment;
1528 llvm::Type *allocaTy;
1529 if (isEscapingByRef) {
1530 auto &byrefInfo = getBlockByrefInfo(&D);
1531 allocaTy = byrefInfo.Type;
1532 allocaAlignment = byrefInfo.ByrefAlignment;
1533 } else {
1534 allocaTy = ConvertTypeForMem(Ty);
1535 allocaAlignment = alignment;
1536 }
1537
1538 // Create the alloca. Note that we set the name separately from
1539 // building the instruction so that it's there even in no-asserts
1540 // builds.
1541 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1542 /*ArraySize=*/nullptr, &AllocaAddr);
1543
1544 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1545 // the catch parameter starts in the catchpad instruction, and we can't
1546 // insert code in those basic blocks.
1547 bool IsMSCatchParam =
1548 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1549
1550 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1551 // if we don't have a valid insertion point (?).
1552 if (HaveInsertPoint() && !IsMSCatchParam) {
1553 // If there's a jump into the lifetime of this variable, its lifetime
1554 // gets broken up into several regions in IR, which requires more work
1555 // to handle correctly. For now, just omit the intrinsics; this is a
1556 // rare case, and it's better to just be conservatively correct.
1557 // PR28267.
1558 //
1559 // We have to do this in all language modes if there's a jump past the
1560 // declaration. We also have to do it in C if there's a jump to an
1561 // earlier point in the current block because non-VLA lifetimes begin as
1562 // soon as the containing block is entered, not when its variables
1563 // actually come into scope; suppressing the lifetime annotations
1564 // completely in this case is unnecessarily pessimistic, but again, this
1565 // is rare.
1566 if (!Bypasses.IsBypassed(&D) &&
1568 llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1569 emission.SizeForLifetimeMarkers =
1570 EmitLifetimeStart(Size, AllocaAddr.getPointer());
1571 }
1572 } else {
1573 assert(!emission.useLifetimeMarkers());
1574 }
1575 }
1576 } else {
1578
1579 // Delayed globalization for variable length declarations. This ensures that
1580 // the expression representing the length has been emitted and can be used
1581 // by the definition of the VLA. Since this is an escaped declaration, in
1582 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1583 // deallocation call to __kmpc_free_shared() is emitted later.
1584 bool VarAllocated = false;
1585 if (getLangOpts().OpenMPIsTargetDevice) {
1586 auto &RT = CGM.getOpenMPRuntime();
1587 if (RT.isDelayedVariableLengthDecl(*this, &D)) {
1588 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1589 std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1590 RT.getKmpcAllocShared(*this, &D);
1591
1592 // Save the address of the allocation:
1593 LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
1596 address = Base.getAddress();
1597
1598 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1599 // appropriate location at the end of the scope of the
1600 // __kmpc_alloc_shared functions:
1601 pushKmpcAllocFree(NormalCleanup, AddrSizePair);
1602
1603 // Mark variable as allocated:
1604 VarAllocated = true;
1605 }
1606 }
1607
1608 if (!VarAllocated) {
1609 if (!DidCallStackSave) {
1610 // Save the stack.
1611 Address Stack =
1613
1614 llvm::Value *V = Builder.CreateStackSave();
1615 assert(V->getType() == AllocaInt8PtrTy);
1616 Builder.CreateStore(V, Stack);
1617
1618 DidCallStackSave = true;
1619
1620 // Push a cleanup block and restore the stack there.
1621 // FIXME: in general circumstances, this should be an EH cleanup.
1623 }
1624
1625 auto VlaSize = getVLASize(Ty);
1626 llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1627
1628 // Allocate memory for the array.
1629 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1630 &AllocaAddr);
1631 }
1632
1633 // If we have debug info enabled, properly describe the VLA dimensions for
1634 // this type by registering the vla size expression for each of the
1635 // dimensions.
1636 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1637 }
1638
1639 setAddrOfLocalVar(&D, address);
1640 emission.Addr = address;
1641 emission.AllocaAddr = AllocaAddr;
1642
1643 // Emit debug info for local var declaration.
1644 if (EmitDebugInfo && HaveInsertPoint()) {
1645 Address DebugAddr = address;
1646 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1647 DI->setLocation(D.getLocation());
1648
1649 // If NRVO, use a pointer to the return address.
1650 if (UsePointerValue) {
1651 DebugAddr = ReturnValuePointer;
1652 AllocaAddr = ReturnValuePointer;
1653 }
1654 (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder,
1655 UsePointerValue);
1656 }
1657
1658 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1659 EmitVarAnnotations(&D, address.emitRawPointer(*this));
1660
1661 // Make sure we call @llvm.lifetime.end.
1662 if (emission.useLifetimeMarkers())
1663 EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1664 emission.getOriginalAllocatedAddress(),
1665 emission.getSizeForLifetimeMarkers());
1666
1667 return emission;
1668}
1669
1670static bool isCapturedBy(const VarDecl &, const Expr *);
1671
1672/// Determines whether the given __block variable is potentially
1673/// captured by the given statement.
1674static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1675 if (const Expr *E = dyn_cast<Expr>(S))
1676 return isCapturedBy(Var, E);
1677 for (const Stmt *SubStmt : S->children())
1678 if (isCapturedBy(Var, SubStmt))
1679 return true;
1680 return false;
1681}
1682
1683/// Determines whether the given __block variable is potentially
1684/// captured by the given expression.
1685static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1686 // Skip the most common kinds of expressions that make
1687 // hierarchy-walking expensive.
1688 E = E->IgnoreParenCasts();
1689
1690 if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1691 const BlockDecl *Block = BE->getBlockDecl();
1692 for (const auto &I : Block->captures()) {
1693 if (I.getVariable() == &Var)
1694 return true;
1695 }
1696
1697 // No need to walk into the subexpressions.
1698 return false;
1699 }
1700
1701 if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1702 const CompoundStmt *CS = SE->getSubStmt();
1703 for (const auto *BI : CS->body())
1704 if (const auto *BIE = dyn_cast<Expr>(BI)) {
1705 if (isCapturedBy(Var, BIE))
1706 return true;
1707 }
1708 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1709 // special case declarations
1710 for (const auto *I : DS->decls()) {
1711 if (const auto *VD = dyn_cast<VarDecl>((I))) {
1712 const Expr *Init = VD->getInit();
1713 if (Init && isCapturedBy(Var, Init))
1714 return true;
1715 }
1716 }
1717 }
1718 else
1719 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1720 // Later, provide code to poke into statements for capture analysis.
1721 return true;
1722 return false;
1723 }
1724
1725 for (const Stmt *SubStmt : E->children())
1726 if (isCapturedBy(Var, SubStmt))
1727 return true;
1728
1729 return false;
1730}
1731
1732/// Determine whether the given initializer is trivial in the sense
1733/// that it requires no code to be generated.
1735 if (!Init)
1736 return true;
1737
1738 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1739 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1740 if (Constructor->isTrivial() &&
1741 Constructor->isDefaultConstructor() &&
1742 !Construct->requiresZeroInitialization())
1743 return true;
1744
1745 return false;
1746}
1747
1748void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1749 const VarDecl &D,
1750 Address Loc) {
1751 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1752 auto trivialAutoVarInitMaxSize =
1753 getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1755 bool isVolatile = type.isVolatileQualified();
1756 if (!Size.isZero()) {
1757 // We skip auto-init variables by their alloc size. Take this as an example:
1758 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1759 // All Foo type variables will be skipped. Ideally, we only skip the buff
1760 // array and still auto-init X in this example.
1761 // TODO: Improve the size filtering to by member size.
1762 auto allocSize = CGM.getDataLayout().getTypeAllocSize(Loc.getElementType());
1763 switch (trivialAutoVarInit) {
1765 llvm_unreachable("Uninitialized handled by caller");
1767 if (CGM.stopAutoInit())
1768 return;
1769 if (trivialAutoVarInitMaxSize > 0 &&
1770 allocSize > trivialAutoVarInitMaxSize)
1771 return;
1772 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1773 break;
1775 if (CGM.stopAutoInit())
1776 return;
1777 if (trivialAutoVarInitMaxSize > 0 &&
1778 allocSize > trivialAutoVarInitMaxSize)
1779 return;
1780 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1781 break;
1782 }
1783 return;
1784 }
1785
1786 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1787 // them, so emit a memcpy with the VLA size to initialize each element.
1788 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1789 // will catch that code, but there exists code which generates zero-sized
1790 // VLAs. Be nice and initialize whatever they requested.
1791 const auto *VlaType = getContext().getAsVariableArrayType(type);
1792 if (!VlaType)
1793 return;
1794 auto VlaSize = getVLASize(VlaType);
1795 auto SizeVal = VlaSize.NumElts;
1796 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1797 switch (trivialAutoVarInit) {
1799 llvm_unreachable("Uninitialized handled by caller");
1800
1802 if (CGM.stopAutoInit())
1803 return;
1804 if (!EltSize.isOne())
1805 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1806 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
1807 SizeVal, isVolatile);
1808 I->addAnnotationMetadata("auto-init");
1809 break;
1810 }
1811
1813 if (CGM.stopAutoInit())
1814 return;
1815 llvm::Type *ElTy = Loc.getElementType();
1816 llvm::Constant *Constant = constWithPadding(
1817 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1818 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1819 llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1820 llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1821 llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1822 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1823 SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1824 "vla.iszerosized");
1825 Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1826 EmitBlock(SetupBB);
1827 if (!EltSize.isOne())
1828 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1829 llvm::Value *BaseSizeInChars =
1830 llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1831 Address Begin = Loc.withElementType(Int8Ty);
1832 llvm::Value *End = Builder.CreateInBoundsGEP(Begin.getElementType(),
1833 Begin.emitRawPointer(*this),
1834 SizeVal, "vla.end");
1835 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1836 EmitBlock(LoopBB);
1837 llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1838 Cur->addIncoming(Begin.emitRawPointer(*this), OriginBB);
1839 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1840 auto *I =
1841 Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
1843 CGM, D, Builder, Constant, ConstantAlign),
1844 BaseSizeInChars, isVolatile);
1845 I->addAnnotationMetadata("auto-init");
1846 llvm::Value *Next =
1847 Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1848 llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1849 Builder.CreateCondBr(Done, ContBB, LoopBB);
1850 Cur->addIncoming(Next, LoopBB);
1851 EmitBlock(ContBB);
1852 } break;
1853 }
1854}
1855
1856void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1857 assert(emission.Variable && "emission was not valid!");
1858
1859 // If this was emitted as a global constant, we're done.
1860 if (emission.wasEmittedAsGlobal()) return;
1861
1862 const VarDecl &D = *emission.Variable;
1864 QualType type = D.getType();
1865
1866 // If this local has an initializer, emit it now.
1867 const Expr *Init = D.getInit();
1868
1869 // If we are at an unreachable point, we don't need to emit the initializer
1870 // unless it contains a label.
1871 if (!HaveInsertPoint()) {
1872 if (!Init || !ContainsLabel(Init)) return;
1874 }
1875
1876 // Initialize the structure of a __block variable.
1877 if (emission.IsEscapingByRef)
1878 emitByrefStructureInit(emission);
1879
1880 // Initialize the variable here if it doesn't have a initializer and it is a
1881 // C struct that is non-trivial to initialize or an array containing such a
1882 // struct.
1883 if (!Init &&
1884 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1886 LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1887 if (emission.IsEscapingByRef)
1888 drillIntoBlockVariable(*this, Dst, &D);
1890 return;
1891 }
1892
1893 // Check whether this is a byref variable that's potentially
1894 // captured and moved by its own initializer. If so, we'll need to
1895 // emit the initializer first, then copy into the variable.
1896 bool capturedByInit =
1897 Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1898
1899 bool locIsByrefHeader = !capturedByInit;
1900 const Address Loc =
1901 locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1902
1903 // Note: constexpr already initializes everything correctly.
1904 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1905 (D.isConstexpr()
1907 : (D.getAttr<UninitializedAttr>()
1909 : getContext().getLangOpts().getTrivialAutoVarInit()));
1910
1911 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1912 if (trivialAutoVarInit ==
1914 return;
1915
1916 // Only initialize a __block's storage: we always initialize the header.
1917 if (emission.IsEscapingByRef && !locIsByrefHeader)
1918 Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1919
1920 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1921 };
1922
1924 return initializeWhatIsTechnicallyUninitialized(Loc);
1925
1926 llvm::Constant *constant = nullptr;
1927 if (emission.IsConstantAggregate ||
1928 D.mightBeUsableInConstantExpressions(getContext())) {
1929 assert(!capturedByInit && "constant init contains a capturing block?");
1931 if (constant && !constant->isZeroValue() &&
1932 (trivialAutoVarInit !=
1934 IsPattern isPattern =
1935 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1936 ? IsPattern::Yes
1937 : IsPattern::No;
1938 // C guarantees that brace-init with fewer initializers than members in
1939 // the aggregate will initialize the rest of the aggregate as-if it were
1940 // static initialization. In turn static initialization guarantees that
1941 // padding is initialized to zero bits. We could instead pattern-init if D
1942 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1943 // behavior.
1944 constant = constWithPadding(CGM, IsPattern::No,
1945 replaceUndef(CGM, isPattern, constant));
1946 }
1947
1948 if (D.getType()->isBitIntType() &&
1950 // Constants for long _BitInt types are split into individual bytes.
1951 // Try to fold these back into an integer constant so it can be stored
1952 // properly.
1953 llvm::Type *LoadType = CGM.getTypes().convertTypeForLoadStore(
1954 D.getType(), constant->getType());
1955 constant = llvm::ConstantFoldLoadFromConst(
1956 constant, LoadType, llvm::APInt::getZero(32), CGM.getDataLayout());
1957 }
1958 }
1959
1960 if (!constant) {
1961 if (trivialAutoVarInit !=
1963 // At this point, we know D has an Init expression, but isn't a constant.
1964 // - If D is not a scalar, auto-var-init conservatively (members may be
1965 // left uninitialized by constructor Init expressions for example).
1966 // - If D is a scalar, we only need to auto-var-init if there is a
1967 // self-reference. Otherwise, the Init expression should be sufficient.
1968 // It may be that the Init expression uses other uninitialized memory,
1969 // but auto-var-init here would not help, as auto-init would get
1970 // overwritten by Init.
1971 if (!D.getType()->isScalarType() || capturedByInit ||
1972 isAccessedBy(D, Init)) {
1973 initializeWhatIsTechnicallyUninitialized(Loc);
1974 }
1975 }
1977 lv.setNonGC(true);
1978 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1979 }
1980
1981 if (!emission.IsConstantAggregate) {
1982 // For simple scalar/complex initialization, store the value directly.
1984 lv.setNonGC(true);
1985 return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1986 }
1987
1988 emitStoresForConstant(CGM, D, Loc.withElementType(CGM.Int8Ty),
1989 type.isVolatileQualified(), Builder, constant,
1990 /*IsAutoInit=*/false);
1991}
1992
1993/// Emit an expression as an initializer for an object (variable, field, etc.)
1994/// at the given location. The expression is not necessarily the normal
1995/// initializer for the object, and the address is not necessarily
1996/// its normal location.
1997///
1998/// \param init the initializing expression
1999/// \param D the object to act as if we're initializing
2000/// \param lvalue the lvalue to initialize
2001/// \param capturedByInit true if \p D is a __block variable
2002/// whose address is potentially changed by the initializer
2003void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
2004 LValue lvalue, bool capturedByInit) {
2005 QualType type = D->getType();
2006
2007 if (type->isReferenceType()) {
2008 RValue rvalue = EmitReferenceBindingToExpr(init);
2009 if (capturedByInit)
2010 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2011 EmitStoreThroughLValue(rvalue, lvalue, true);
2012 return;
2013 }
2014 switch (getEvaluationKind(type)) {
2015 case TEK_Scalar:
2016 EmitScalarInit(init, D, lvalue, capturedByInit);
2017 return;
2018 case TEK_Complex: {
2019 ComplexPairTy complex = EmitComplexExpr(init);
2020 if (capturedByInit)
2021 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2022 EmitStoreOfComplex(complex, lvalue, /*init*/ true);
2023 return;
2024 }
2025 case TEK_Aggregate:
2026 if (type->isAtomicType()) {
2027 EmitAtomicInit(const_cast<Expr*>(init), lvalue);
2028 } else {
2030 if (isa<VarDecl>(D))
2032 else if (auto *FD = dyn_cast<FieldDecl>(D))
2033 Overlap = getOverlapForFieldInit(FD);
2034 // TODO: how can we delay here if D is captured by its initializer?
2035 EmitAggExpr(init,
2038 AggValueSlot::IsNotAliased, Overlap));
2039 }
2040 return;
2041 }
2042 llvm_unreachable("bad evaluation kind");
2043}
2044
2045/// Enter a destroy cleanup for the given local variable.
2047 const CodeGenFunction::AutoVarEmission &emission,
2048 QualType::DestructionKind dtorKind) {
2049 assert(dtorKind != QualType::DK_none);
2050
2051 // Note that for __block variables, we want to destroy the
2052 // original stack object, not the possibly forwarded object.
2053 Address addr = emission.getObjectAddress(*this);
2054
2055 const VarDecl *var = emission.Variable;
2056 QualType type = var->getType();
2057
2058 CleanupKind cleanupKind = NormalAndEHCleanup;
2059 CodeGenFunction::Destroyer *destroyer = nullptr;
2060
2061 switch (dtorKind) {
2062 case QualType::DK_none:
2063 llvm_unreachable("no cleanup for trivially-destructible variable");
2064
2066 // If there's an NRVO flag on the emission, we need a different
2067 // cleanup.
2068 if (emission.NRVOFlag) {
2069 assert(!type->isArrayType());
2070 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2071 EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
2072 emission.NRVOFlag);
2073 return;
2074 }
2075 break;
2076
2078 // Suppress cleanups for pseudo-strong variables.
2079 if (var->isARCPseudoStrong()) return;
2080
2081 // Otherwise, consider whether to use an EH cleanup or not.
2082 cleanupKind = getARCCleanupKind();
2083
2084 // Use the imprecise destroyer by default.
2085 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2087 break;
2088
2090 break;
2091
2094 if (emission.NRVOFlag) {
2095 assert(!type->isArrayType());
2096 EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
2097 emission.NRVOFlag, type);
2098 return;
2099 }
2100 break;
2101 }
2102
2103 // If we haven't chosen a more specific destroyer, use the default.
2104 if (!destroyer) destroyer = getDestroyer(dtorKind);
2105
2106 // Use an EH cleanup in array destructors iff the destructor itself
2107 // is being pushed as an EH cleanup.
2108 bool useEHCleanup = (cleanupKind & EHCleanup);
2109 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2110 useEHCleanup);
2111}
2112
2113void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2114 assert(emission.Variable && "emission was not valid!");
2115
2116 // If this was emitted as a global constant, we're done.
2117 if (emission.wasEmittedAsGlobal()) return;
2118
2119 // If we don't have an insertion point, we're done. Sema prevents
2120 // us from jumping into any of these scopes anyway.
2121 if (!HaveInsertPoint()) return;
2122
2123 const VarDecl &D = *emission.Variable;
2124
2125 // Check the type for a cleanup.
2126 if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2127 emitAutoVarTypeCleanup(emission, dtorKind);
2128
2129 // In GC mode, honor objc_precise_lifetime.
2130 if (getLangOpts().getGC() != LangOptions::NonGC &&
2131 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2132 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2133 }
2134
2135 // Handle the cleanup attribute.
2136 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2137 const FunctionDecl *FD = CA->getFunctionDecl();
2138
2139 llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2140 assert(F && "Could not find function!");
2141
2143 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2144 }
2145
2146 // If this is a block variable, call _Block_object_destroy
2147 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2148 // mode.
2149 if (emission.IsEscapingByRef &&
2150 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2152 if (emission.Variable->getType().isObjCGCWeak())
2153 Flags |= BLOCK_FIELD_IS_WEAK;
2154 enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2155 /*LoadBlockVarAddr*/ false,
2156 cxxDestructorCanThrow(emission.Variable->getType()));
2157 }
2158}
2159
2162 switch (kind) {
2163 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2165 return destroyCXXObject;
2169 return destroyARCWeak;
2172 }
2173 llvm_unreachable("Unknown DestructionKind");
2174}
2175
2176/// pushEHDestroy - Push the standard destructor for the given type as
2177/// an EH-only cleanup.
2179 Address addr, QualType type) {
2180 assert(dtorKind && "cannot push destructor for trivial type");
2181 assert(needsEHCleanup(dtorKind));
2182
2183 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2184}
2185
2186/// pushDestroy - Push the standard destructor for the given type as
2187/// at least a normal cleanup.
2189 Address addr, QualType type) {
2190 assert(dtorKind && "cannot push destructor for trivial type");
2191
2192 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2193 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2194 cleanupKind & EHCleanup);
2195}
2196
2197void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2198 QualType type, Destroyer *destroyer,
2199 bool useEHCleanupForArray) {
2200 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2201 destroyer, useEHCleanupForArray);
2202}
2203
2204// Pushes a destroy and defers its deactivation until its
2205// CleanupDeactivationScope is exited.
2208 assert(dtorKind && "cannot push destructor for trivial type");
2209
2210 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2212 cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup);
2213}
2214
2216 CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer,
2217 bool useEHCleanupForArray) {
2218 llvm::Instruction *DominatingIP =
2219 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
2220 pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2222 {EHStack.stable_begin(), DominatingIP});
2223}
2224
2226 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2227}
2228
2230 CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2231 EHStack.pushCleanup<KmpcAllocFree>(Kind, AddrSizePair);
2232}
2233
2235 Address addr, QualType type,
2236 Destroyer *destroyer,
2237 bool useEHCleanupForArray) {
2238 // If we're not in a conditional branch, we don't need to bother generating a
2239 // conditional cleanup.
2240 if (!isInConditionalBranch()) {
2241 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2242 // around in case a temporary's destructor throws an exception.
2243
2244 // Add the cleanup to the EHStack. After the full-expr, this would be
2245 // deactivated before being popped from the stack.
2246 pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer,
2247 useEHCleanupForArray);
2248
2249 // Since this is lifetime-extended, push it once again to the EHStack after
2250 // the full expression.
2251 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2252 cleanupKind, Address::invalid(), addr, type, destroyer,
2253 useEHCleanupForArray);
2254 }
2255
2256 // Otherwise, we should only destroy the object if it's been initialized.
2257
2258 using ConditionalCleanupType =
2260 Destroyer *, bool>;
2262
2263 // Remember to emit cleanup if we branch-out before end of full-expression
2264 // (eg: through stmt-expr or coro suspensions).
2265 AllocaTrackerRAII DeactivationAllocas(*this);
2266 Address ActiveFlagForDeactivation = createCleanupActiveFlag();
2267
2268 pushCleanupAndDeferDeactivation<ConditionalCleanupType>(
2269 cleanupKind, SavedAddr, type, destroyer, useEHCleanupForArray);
2270 initFullExprCleanupWithFlag(ActiveFlagForDeactivation);
2271 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
2272 // Erase the active flag if the cleanup was not emitted.
2273 cleanup.AddAuxAllocas(std::move(DeactivationAllocas).Take());
2274
2275 // Since this is lifetime-extended, push it once again to the EHStack after
2276 // the full expression.
2277 // The previous active flag would always be 'false' due to forced deferred
2278 // deactivation. Use a separate flag for lifetime-extension to correctly
2279 // remember if this branch was taken and the object was initialized.
2280 Address ActiveFlagForLifetimeExt = createCleanupActiveFlag();
2281 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2282 cleanupKind, ActiveFlagForLifetimeExt, SavedAddr, type, destroyer,
2283 useEHCleanupForArray);
2284}
2285
2286/// emitDestroy - Immediately perform the destruction of the given
2287/// object.
2288///
2289/// \param addr - the address of the object; a type*
2290/// \param type - the type of the object; if an array type, all
2291/// objects are destroyed in reverse order
2292/// \param destroyer - the function to call to destroy individual
2293/// elements
2294/// \param useEHCleanupForArray - whether an EH cleanup should be
2295/// used when destroying array elements, in case one of the
2296/// destructions throws an exception
2298 Destroyer *destroyer,
2299 bool useEHCleanupForArray) {
2301 if (!arrayType)
2302 return destroyer(*this, addr, type);
2303
2304 llvm::Value *length = emitArrayLength(arrayType, type, addr);
2305
2306 CharUnits elementAlign =
2307 addr.getAlignment()
2308 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2309
2310 // Normally we have to check whether the array is zero-length.
2311 bool checkZeroLength = true;
2312
2313 // But if the array length is constant, we can suppress that.
2314 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2315 // ...and if it's constant zero, we can just skip the entire thing.
2316 if (constLength->isZero()) return;
2317 checkZeroLength = false;
2318 }
2319
2320 llvm::Value *begin = addr.emitRawPointer(*this);
2321 llvm::Value *end =
2323 emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2324 checkZeroLength, useEHCleanupForArray);
2325}
2326
2327/// emitArrayDestroy - Destroys all the elements of the given array,
2328/// beginning from last to first. The array cannot be zero-length.
2329///
2330/// \param begin - a type* denoting the first element of the array
2331/// \param end - a type* denoting one past the end of the array
2332/// \param elementType - the element type of the array
2333/// \param destroyer - the function to call to destroy elements
2334/// \param useEHCleanup - whether to push an EH cleanup to destroy
2335/// the remaining elements in case the destruction of a single
2336/// element throws
2337void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2338 llvm::Value *end,
2339 QualType elementType,
2340 CharUnits elementAlign,
2341 Destroyer *destroyer,
2342 bool checkZeroLength,
2343 bool useEHCleanup) {
2344 assert(!elementType->isArrayType());
2345
2346 // The basic structure here is a do-while loop, because we don't
2347 // need to check for the zero-element case.
2348 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2349 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2350
2351 if (checkZeroLength) {
2352 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2353 "arraydestroy.isempty");
2354 Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2355 }
2356
2357 // Enter the loop body, making that address the current address.
2358 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2359 EmitBlock(bodyBB);
2360 llvm::PHINode *elementPast =
2361 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2362 elementPast->addIncoming(end, entryBB);
2363
2364 // Shift the address back by one element.
2365 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2366 llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
2367 llvm::Value *element = Builder.CreateInBoundsGEP(
2368 llvmElementType, elementPast, negativeOne, "arraydestroy.element");
2369
2370 if (useEHCleanup)
2371 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2372 destroyer);
2373
2374 // Perform the actual destruction there.
2375 destroyer(*this, Address(element, llvmElementType, elementAlign),
2376 elementType);
2377
2378 if (useEHCleanup)
2380
2381 // Check whether we've reached the end.
2382 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2383 Builder.CreateCondBr(done, doneBB, bodyBB);
2384 elementPast->addIncoming(element, Builder.GetInsertBlock());
2385
2386 // Done.
2387 EmitBlock(doneBB);
2388}
2389
2390/// Perform partial array destruction as if in an EH cleanup. Unlike
2391/// emitArrayDestroy, the element type here may still be an array type.
2393 llvm::Value *begin, llvm::Value *end,
2394 QualType type, CharUnits elementAlign,
2395 CodeGenFunction::Destroyer *destroyer) {
2396 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2397
2398 // If the element type is itself an array, drill down.
2399 unsigned arrayDepth = 0;
2400 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2401 // VLAs don't require a GEP index to walk into.
2402 if (!isa<VariableArrayType>(arrayType))
2403 arrayDepth++;
2404 type = arrayType->getElementType();
2405 }
2406
2407 if (arrayDepth) {
2408 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2409
2410 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2411 begin = CGF.Builder.CreateInBoundsGEP(
2412 elemTy, begin, gepIndices, "pad.arraybegin");
2413 end = CGF.Builder.CreateInBoundsGEP(
2414 elemTy, end, gepIndices, "pad.arrayend");
2415 }
2416
2417 // Destroy the array. We don't ever need an EH cleanup because we
2418 // assume that we're in an EH cleanup ourselves, so a throwing
2419 // destructor causes an immediate terminate.
2420 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2421 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2422}
2423
2424namespace {
2425 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2426 /// array destroy where the end pointer is regularly determined and
2427 /// does not need to be loaded from a local.
2428 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2429 llvm::Value *ArrayBegin;
2430 llvm::Value *ArrayEnd;
2431 QualType ElementType;
2432 CodeGenFunction::Destroyer *Destroyer;
2433 CharUnits ElementAlign;
2434 public:
2435 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2436 QualType elementType, CharUnits elementAlign,
2437 CodeGenFunction::Destroyer *destroyer)
2438 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2439 ElementType(elementType), Destroyer(destroyer),
2440 ElementAlign(elementAlign) {}
2441
2442 void Emit(CodeGenFunction &CGF, Flags flags) override {
2443 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2444 ElementType, ElementAlign, Destroyer);
2445 }
2446 };
2447
2448 /// IrregularPartialArrayDestroy - a cleanup which performs a
2449 /// partial array destroy where the end pointer is irregularly
2450 /// determined and must be loaded from a local.
2451 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2452 llvm::Value *ArrayBegin;
2453 Address ArrayEndPointer;
2454 QualType ElementType;
2455 CodeGenFunction::Destroyer *Destroyer;
2456 CharUnits ElementAlign;
2457 public:
2458 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2459 Address arrayEndPointer,
2460 QualType elementType,
2461 CharUnits elementAlign,
2462 CodeGenFunction::Destroyer *destroyer)
2463 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2464 ElementType(elementType), Destroyer(destroyer),
2465 ElementAlign(elementAlign) {}
2466
2467 void Emit(CodeGenFunction &CGF, Flags flags) override {
2468 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2469 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2470 ElementType, ElementAlign, Destroyer);
2471 }
2472 };
2473} // end anonymous namespace
2474
2475/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
2476/// destroy already-constructed elements of the given array. The cleanup may be
2477/// popped with DeactivateCleanupBlock or PopCleanupBlock.
2478///
2479/// \param elementType - the immediate element type of the array;
2480/// possibly still an array type
2481void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2482 Address arrayEndPointer,
2483 QualType elementType,
2484 CharUnits elementAlign,
2485 Destroyer *destroyer) {
2486 pushFullExprCleanup<IrregularPartialArrayDestroy>(
2487 NormalAndEHCleanup, arrayBegin, arrayEndPointer, elementType,
2488 elementAlign, destroyer);
2489}
2490
2491/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2492/// already-constructed elements of the given array. The cleanup
2493/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2494///
2495/// \param elementType - the immediate element type of the array;
2496/// possibly still an array type
2497void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2498 llvm::Value *arrayEnd,
2499 QualType elementType,
2500 CharUnits elementAlign,
2501 Destroyer *destroyer) {
2502 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2503 arrayBegin, arrayEnd,
2504 elementType, elementAlign,
2505 destroyer);
2506}
2507
2508/// Lazily declare the @llvm.lifetime.start intrinsic.
2510 if (LifetimeStartFn)
2511 return LifetimeStartFn;
2512 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2513 llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2514 return LifetimeStartFn;
2515}
2516
2517/// Lazily declare the @llvm.lifetime.end intrinsic.
2519 if (LifetimeEndFn)
2520 return LifetimeEndFn;
2521 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2522 llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2523 return LifetimeEndFn;
2524}
2525
2526namespace {
2527 /// A cleanup to perform a release of an object at the end of a
2528 /// function. This is used to balance out the incoming +1 of a
2529 /// ns_consumed argument when we can't reasonably do that just by
2530 /// not doing the initial retain for a __block argument.
2531 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2532 ConsumeARCParameter(llvm::Value *param,
2533 ARCPreciseLifetime_t precise)
2534 : Param(param), Precise(precise) {}
2535
2536 llvm::Value *Param;
2537 ARCPreciseLifetime_t Precise;
2538
2539 void Emit(CodeGenFunction &CGF, Flags flags) override {
2540 CGF.EmitARCRelease(Param, Precise);
2541 }
2542 };
2543} // end anonymous namespace
2544
2545/// Emit an alloca (or GlobalValue depending on target)
2546/// for the specified parameter and set up LocalDeclMap.
2547void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2548 unsigned ArgNo) {
2549 bool NoDebugInfo = false;
2550 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2551 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2552 "Invalid argument to EmitParmDecl");
2553
2554 // Set the name of the parameter's initial value to make IR easier to
2555 // read. Don't modify the names of globals.
2556 if (!isa<llvm::GlobalValue>(Arg.getAnyValue()))
2557 Arg.getAnyValue()->setName(D.getName());
2558
2559 QualType Ty = D.getType();
2560
2561 // Use better IR generation for certain implicit parameters.
2562 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2563 // The only implicit argument a block has is its literal.
2564 // This may be passed as an inalloca'ed value on Windows x86.
2565 if (BlockInfo) {
2566 llvm::Value *V = Arg.isIndirect()
2567 ? Builder.CreateLoad(Arg.getIndirectAddress())
2568 : Arg.getDirectValue();
2569 setBlockContextParameter(IPD, ArgNo, V);
2570 return;
2571 }
2572 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2573 // debug info of TLS variables.
2574 NoDebugInfo =
2575 (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2576 }
2577
2578 Address DeclPtr = Address::invalid();
2579 RawAddress AllocaPtr = Address::invalid();
2580 bool DoStore = false;
2581 bool IsScalar = hasScalarEvaluationKind(Ty);
2582 bool UseIndirectDebugAddress = false;
2583
2584 // If we already have a pointer to the argument, reuse the input pointer.
2585 if (Arg.isIndirect()) {
2586 DeclPtr = Arg.getIndirectAddress();
2587 DeclPtr = DeclPtr.withElementType(ConvertTypeForMem(Ty));
2588 // Indirect argument is in alloca address space, which may be different
2589 // from the default address space.
2590 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2591 auto *V = DeclPtr.emitRawPointer(*this);
2592 AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
2593
2594 // For truly ABI indirect arguments -- those that are not `byval` -- store
2595 // the address of the argument on the stack to preserve debug information.
2596 ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2597 if (ArgInfo.isIndirect())
2598 UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2599 if (UseIndirectDebugAddress) {
2600 auto PtrTy = getContext().getPointerType(Ty);
2601 AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
2602 D.getName() + ".indirect_addr");
2603 EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
2604 }
2605
2606 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2607 auto DestLangAS =
2609 if (SrcLangAS != DestLangAS) {
2610 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2611 CGM.getDataLayout().getAllocaAddrSpace());
2612 auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2613 auto *T = llvm::PointerType::get(getLLVMContext(), DestAS);
2614 DeclPtr =
2615 DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
2616 *this, V, SrcLangAS, DestLangAS, T, true),
2617 DeclPtr.isKnownNonNull());
2618 }
2619
2620 // Push a destructor cleanup for this parameter if the ABI requires it.
2621 // Don't push a cleanup in a thunk for a method that will also emit a
2622 // cleanup.
2623 if (Ty->isRecordType() && !CurFuncIsThunk &&
2625 if (QualType::DestructionKind DtorKind =
2626 D.needsDestruction(getContext())) {
2627 assert((DtorKind == QualType::DK_cxx_destructor ||
2628 DtorKind == QualType::DK_nontrivial_c_struct) &&
2629 "unexpected destructor type");
2630 pushDestroy(DtorKind, DeclPtr, Ty);
2631 CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2633 }
2634 }
2635 } else {
2636 // Check if the parameter address is controlled by OpenMP runtime.
2637 Address OpenMPLocalAddr =
2638 getLangOpts().OpenMP
2640 : Address::invalid();
2641 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2642 DeclPtr = OpenMPLocalAddr;
2643 AllocaPtr = DeclPtr;
2644 } else {
2645 // Otherwise, create a temporary to hold the value.
2646 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2647 D.getName() + ".addr", &AllocaPtr);
2648 }
2649 DoStore = true;
2650 }
2651
2652 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2653
2654 LValue lv = MakeAddrLValue(DeclPtr, Ty);
2655 if (IsScalar) {
2656 Qualifiers qs = Ty.getQualifiers();
2658 // We honor __attribute__((ns_consumed)) for types with lifetime.
2659 // For __strong, it's handled by just skipping the initial retain;
2660 // otherwise we have to balance out the initial +1 with an extra
2661 // cleanup to do the release at the end of the function.
2662 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2663
2664 // If a parameter is pseudo-strong then we can omit the implicit retain.
2665 if (D.isARCPseudoStrong()) {
2666 assert(lt == Qualifiers::OCL_Strong &&
2667 "pseudo-strong variable isn't strong?");
2668 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2670 }
2671
2672 // Load objects passed indirectly.
2673 if (Arg.isIndirect() && !ArgVal)
2674 ArgVal = Builder.CreateLoad(DeclPtr);
2675
2676 if (lt == Qualifiers::OCL_Strong) {
2677 if (!isConsumed) {
2678 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2679 // use objc_storeStrong(&dest, value) for retaining the
2680 // object. But first, store a null into 'dest' because
2681 // objc_storeStrong attempts to release its old value.
2682 llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2683 EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2684 EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
2685 DoStore = false;
2686 }
2687 else
2688 // Don't use objc_retainBlock for block pointers, because we
2689 // don't want to Block_copy something just because we got it
2690 // as a parameter.
2691 ArgVal = EmitARCRetainNonBlock(ArgVal);
2692 }
2693 } else {
2694 // Push the cleanup for a consumed parameter.
2695 if (isConsumed) {
2696 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2698 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2699 precise);
2700 }
2701
2702 if (lt == Qualifiers::OCL_Weak) {
2703 EmitARCInitWeak(DeclPtr, ArgVal);
2704 DoStore = false; // The weak init is a store, no need to do two.
2705 }
2706 }
2707
2708 // Enter the cleanup scope.
2709 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2710 }
2711 }
2712
2713 // Store the initial value into the alloca.
2714 if (DoStore)
2715 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2716
2717 setAddrOfLocalVar(&D, DeclPtr);
2718
2719 // Emit debug info for param declarations in non-thunk functions.
2720 if (CGDebugInfo *DI = getDebugInfo()) {
2722 !NoDebugInfo) {
2723 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2724 &D, AllocaPtr.getPointer(), ArgNo, Builder, UseIndirectDebugAddress);
2725 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
2726 DI->getParamDbgMappings().insert({Var, DILocalVar});
2727 }
2728 }
2729
2730 if (D.hasAttr<AnnotateAttr>())
2731 EmitVarAnnotations(&D, DeclPtr.emitRawPointer(*this));
2732
2733 // We can only check return value nullability if all arguments to the
2734 // function satisfy their nullability preconditions. This makes it necessary
2735 // to emit null checks for args in the function body itself.
2736 if (requiresReturnValueNullabilityCheck()) {
2737 auto Nullability = Ty->getNullability();
2738 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2739 SanitizerScope SanScope(this);
2740 RetValNullabilityPrecondition =
2741 Builder.CreateAnd(RetValNullabilityPrecondition,
2742 Builder.CreateIsNotNull(Arg.getAnyValue()));
2743 }
2744 }
2745}
2746
2748 CodeGenFunction *CGF) {
2749 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2750 return;
2752}
2753
2755 CodeGenFunction *CGF) {
2756 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2757 (!LangOpts.EmitAllDecls && !D->isUsed()))
2758 return;
2760}
2761
2764}
2765
2767 for (const Expr *E : D->varlist()) {
2768 const auto *DE = cast<DeclRefExpr>(E);
2769 const auto *VD = cast<VarDecl>(DE->getDecl());
2770
2771 // Skip all but globals.
2772 if (!VD->hasGlobalStorage())
2773 continue;
2774
2775 // Check if the global has been materialized yet or not. If not, we are done
2776 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2777 // we already emitted the global we might have done so before the
2778 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2779 // (potentially). While not pretty, common practise is to remove the old IR
2780 // global and generate a new one, so we do that here too. Uses are replaced
2781 // properly.
2782 StringRef MangledName = getMangledName(VD);
2783 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
2784 if (!Entry)
2785 continue;
2786
2787 // We can also keep the existing global if the address space is what we
2788 // expect it to be, if not, it is replaced.
2789 QualType ASTTy = VD->getType();
2791 auto TargetAS = getContext().getTargetAddressSpace(GVAS);
2792 if (Entry->getType()->getAddressSpace() == TargetAS)
2793 continue;
2794
2795 // Make a new global with the correct type / address space.
2796 llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy);
2797 llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS);
2798
2799 // Replace all uses of the old global with a cast. Since we mutate the type
2800 // in place we neeed an intermediate that takes the spot of the old entry
2801 // until we can create the cast.
2802 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2803 getModule(), Entry->getValueType(), false,
2804 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2805 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2806 Entry->replaceAllUsesWith(DummyGV);
2807
2808 Entry->mutateType(PTy);
2809 llvm::Constant *NewPtrForOldDecl =
2810 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2811 Entry, DummyGV->getType());
2812
2813 // Now we have a casted version of the changed global, the dummy can be
2814 // replaced and deleted.
2815 DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
2816 DummyGV->eraseFromParent();
2817 }
2818}
2819
2820std::optional<CharUnits>
2822 if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2823 if (Expr *Alignment = AA->getAlignment()) {
2824 unsigned UserAlign =
2825 Alignment->EvaluateKnownConstInt(getContext()).getExtValue();
2826 CharUnits NaturalAlign =
2828
2829 // OpenMP5.1 pg 185 lines 7-10
2830 // Each item in the align modifier list must be aligned to the maximum
2831 // of the specified alignment and the type's natural alignment.
2833 std::max<unsigned>(UserAlign, NaturalAlign.getQuantity()));
2834 }
2835 }
2836 return std::nullopt;
2837}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3341
static void emitStoresForInitAfterBZero(CodeGenModule &CGM, llvm::Constant *Init, Address Loc, bool isVolatile, CGBuilderTy &Builder, bool IsAutoInit)
For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit the scalar stores that woul...
Definition: CGDecl.cpp:924
static bool isCapturedBy(const VarDecl &, const Expr *)
Determines whether the given __block variable is potentially captured by the given expression.
Definition: CGDecl.cpp:1685
static void emitPartialArrayDestroy(CodeGenFunction &CGF, llvm::Value *begin, llvm::Value *end, QualType type, CharUnits elementAlign, CodeGenFunction::Destroyer *destroyer)
Perform partial array destruction as if in an EH cleanup.
Definition: CGDecl.cpp:2392
static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D, Address Loc, bool isVolatile, CGBuilderTy &Builder)
Definition: CGDecl.cpp:1275
static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init, unsigned &NumStores)
Decide whether we can emit the non-zero parts of the specified initializer with equal or fewer than N...
Definition: CGDecl.cpp:886
static llvm::Constant * patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern, llvm::Type *Ty)
Generate a constant filled with either a pattern or zeroes.
Definition: CGDecl.cpp:1018
static llvm::Constant * constWithPadding(CodeGenModule &CGM, IsPattern isPattern, llvm::Constant *constant)
Replace all padding bytes in a given constant with either a pattern byte or 0x00.
Definition: CGDecl.cpp:1070
static llvm::Value * shouldUseMemSetToInitialize(llvm::Constant *Init, uint64_t GlobalSize, const llvm::DataLayout &DL)
Decide whether we should use memset to initialize a local variable instead of using a memcpy from a c...
Definition: CGDecl.cpp:992
IsPattern
Definition: CGDecl.cpp:1015
static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D)
Definition: CGDecl.cpp:223
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D, Address Loc, bool isVolatile, CGBuilderTy &Builder, llvm::Constant *constant, bool IsAutoInit)
Definition: CGDecl.cpp:1165
static bool shouldSplitConstantStore(CodeGenModule &CGM, uint64_t GlobalByteSize)
Decide whether we want to split a constant structure or array store into a sequence of its fields' st...
Definition: CGDecl.cpp:1004
static llvm::Constant * replaceUndef(CodeGenModule &CGM, IsPattern isPattern, llvm::Constant *constant)
Definition: CGDecl.cpp:1297
static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF, const LValue &destLV, const Expr *init)
Definition: CGDecl.cpp:692
static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init, uint64_t GlobalSize)
Decide whether we should use bzero plus some stores to initialize a local variable instead of using a...
Definition: CGDecl.cpp:971
static llvm::Constant * constStructWithPadding(CodeGenModule &CGM, IsPattern isPattern, llvm::StructType *STy, llvm::Constant *constant)
Helper function for constWithPadding() to deal with padding in structures.
Definition: CGDecl.cpp:1030
static bool containsUndef(llvm::Constant *constant)
Definition: CGDecl.cpp:1286
static bool isAccessedBy(const VarDecl &var, const Stmt *s)
Definition: CGDecl.cpp:660
static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var, Address addr, Qualifiers::ObjCLifetime lifetime)
EmitAutoVarWithLifetime - Does the setup required for an automatic variable with lifetime.
Definition: CGDecl.cpp:624
static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM, const VarDecl &D, CGBuilderTy &Builder, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1156
static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D, Address Loc, bool isVolatile, CGBuilderTy &Builder)
Definition: CGDecl.cpp:1265
static void drillIntoBlockVariable(CodeGenFunction &CGF, LValue &lvalue, const VarDecl *var)
Definition: CGDecl.cpp:741
CodeGenFunction::ComplexPairTy ComplexPairTy
const Decl * D
Expr * E
This file defines OpenMP nodes for declarative directives.
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
static const NamedDecl * getDefinition(const Decl *D)
Definition: SemaDecl.cpp:2885
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
SourceLocation Begin
__device__ __2f16 float __ockl_bool s
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
IdentifierTable & Idents
Definition: ASTContext.h:660
const LangOptions & getLangOpts() const
Definition: ASTContext.h:797
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2828
unsigned getTargetAddressSpace(LangAS AS) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3571
Represents a block literal declaration, which is like an unnamed FunctionDecl.
Definition: Decl.h:4471
ArrayRef< Capture > captures() const
Definition: Decl.h:4598
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6365
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2539
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1375
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2803
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:259
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:231
bool isValid() const
Definition: Address.h:177
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:902
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:135
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:396
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:107
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
Definition: CGBuilder.h:157
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:363
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:344
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl)
Emit information about a global variable.
Param2DILocTy & getParamDbgMappings()
Definition: CGDebugInfo.h:619
llvm::DILocalVariable * EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI, unsigned ArgNo, CGBuilderTy &Builder, bool UsePointerValue=false)
Emit call to llvm.dbg.declare for an argument variable declaration.
llvm::DILocalVariable * EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI, CGBuilderTy &Builder, const bool UsePointerValue=false)
Emit call to llvm.dbg.declare for an automatic variable declaration.
void setLocation(SourceLocation Loc)
Update the current source location.
void registerVLASizeExpression(QualType Ty, llvm::Metadata *SizeExpr)
Register VLA size expression debug node with the qualified type.
Definition: CGDebugInfo.h:424
CGFunctionInfo - Class to encapsulate the information about a function definition.
const_arg_iterator arg_begin() const
MutableArrayRef< ArgInfo > arguments()
virtual void EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF, const VarDecl &D)
Emit the IR required for a work-group-local variable declaration, and add an entry to CGF's LocalDecl...
Allows to disable automatic handling of functions used in target regions as those marked as omp decla...
virtual void getKmpcFreeShared(CodeGenFunction &CGF, const std::pair< llvm::Value *, llvm::Value * > &AddrSizePair)
Get call to __kmpc_free_shared.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF=nullptr)
Emit the function for the user defined mapper construct.
virtual void processRequiresDirective(const OMPRequiresDecl *D)
Perform check on requires decl to ensure that target architecture supports unified addressing.
virtual std::pair< llvm::Value *, llvm::Value * > getKmpcAllocShared(CodeGenFunction &CGF, const VarDecl *VD)
Get call to __kmpc_alloc_shared.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D)
Emit code for the specified user defined reduction construct.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:298
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, QualType::DestructionKind dtorKind)
void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags, bool LoadBlockVarAddr, bool CanThrow)
Enter a cleanup to destroy a __block variable.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
static Destroyer destroyNonTrivialCStruct
static bool cxxDestructorCanThrow(QualType T)
Check if T is a C++ class that has a destructor that can throw.
SanitizerSet SanOpts
Sanitizers enabled for this function.
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
void EmitARCMoveWeak(Address dst, Address src)
void EmitAutoVarDecl(const VarDecl &D)
EmitAutoVarDecl - Emit an auto variable declaration.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
static bool hasScalarEvaluationKind(QualType T)
const BlockByrefInfo & getBlockByrefInfo(const VarDecl *var)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitExtendGCLifetime(llvm::Value *object)
EmitExtendGCLifetime - Given a pointer to an Objective-C object, make sure it survives garbage collec...
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end, QualType elementType, CharUnits elementAlign, Destroyer *destroyer, bool checkZeroLength, bool useEHCleanup)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
EmitExprAsInit - Emits the code necessary to initialize a location in memory with the given initializ...
void emitByrefStructureInit(const AutoVarEmission &emission)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
@ TCK_NonnullAssign
Checking the value assigned to a _Nonnull pointer. Must not be null.
llvm::Value * EmitARCStoreStrongCall(Address addr, llvm::Value *value, bool resultIgnored)
llvm::Type * ConvertTypeForMem(QualType T)
llvm::Value * EmitARCUnsafeUnretainedScalarExpr(const Expr *expr)
void EmitAutoVarInit(const AutoVarEmission &emission)
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
DominatingValue< T >::saved_type saveValueInCond(T value)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitAtomicInit(Expr *E, LValue lvalue)
const TargetInfo & getTarget() const
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
void emitDestroy(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)
Emit code in this function to perform a guarded variable initialization.
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
void initFullExprCleanupWithFlag(RawAddress ActiveFlag)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
void EmitARCCopyWeak(Address dst, Address src)
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum, llvm::Value *ptr)
void defaultInitNonTrivialCStructVar(LValue Dst)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
bool isTrivialInitializer(const Expr *Init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitDeclRefLValue(const DeclRefExpr *E)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
llvm::Value * EmitARCRetainAutoreleaseScalarExpr(const Expr *expr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
void EmitVarDecl(const VarDecl &D)
EmitVarDecl - Emit a local variable declaration.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
void EmitAutoVarCleanups(const AutoVarEmission &emission)
llvm::GlobalVariable * AddInitializerToStaticVarDecl(const VarDecl &D, llvm::GlobalVariable *GV)
AddInitializerToStaticVarDecl - Add the initializer for 'D' to the global variable that has already b...
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::Type * ConvertType(QualType T)
void EmitARCInitWeak(Address addr, llvm::Value *value)
static Destroyer destroyARCStrongPrecise
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
void pushStackRestore(CleanupKind kind, Address SPMem)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const CGFunctionInfo * CurFnInfo
void pushKmpcAllocFree(CleanupKind Kind, std::pair< llvm::Value *, llvm::Value * > AddrSizePair)
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static Destroyer destroyARCStrongImprecise
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo)
Emits the alloca and debug information for the size expressions for each dimension of an array.
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
This class organizes the cross-function state that is used while generating LLVM code.
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD)
void setGVProperties(llvm::GlobalValue *GV, GlobalDecl GD) const
Set visibility, dllimport/dllexport and dso_local.
llvm::Module & getModule() const
void setStaticLocalDeclAddress(const VarDecl *D, llvm::Constant *C)
llvm::Function * getLLVMLifetimeStartFn()
Lazily declare the @llvm.lifetime.start intrinsic.
Definition: CGDecl.cpp:2509
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1106
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGOpenCLRuntime & getOpenCLRuntime()
Return a reference to the configured OpenCL runtime.
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
void EmitOMPAllocateDecl(const OMPAllocateDecl *D)
Emit a code for the allocate directive.
Definition: CGDecl.cpp:2766
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
void addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.compiler.used metadata.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:246
llvm::Constant * GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition=NotForDefinition)
void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV)
Add global annotations that are set on D, for the global GV.
void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const
Set the TLS mode for the given LLVM GlobalValue for the thread-local variable declaration D.
ASTContext & getContext() const
void EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF=nullptr)
Emit a code for declare mapper construct.
Definition: CGDecl.cpp:2754
llvm::Function * getLLVMLifetimeEndFn()
Lazily declare the @llvm.lifetime.end intrinsic.
Definition: CGDecl.cpp:2518
void EmitOMPRequiresDecl(const OMPRequiresDecl *D)
Emit a code for requires directive.
Definition: CGDecl.cpp:2762
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
std::optional< CharUnits > getOMPAllocateAlignment(const VarDecl *VD)
Return the alignment specified in an allocate directive, if present.
Definition: CGDecl.cpp:2821
llvm::LLVMContext & getLLVMContext()
llvm::GlobalValue * GetGlobalValue(StringRef Ref)
void EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D, CodeGenFunction *CGF=nullptr)
Emit a code for declare reduction construct.
Definition: CGDecl.cpp:2747
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
LangAS GetGlobalVarAddressSpace(const VarDecl *D)
Return the AST address space of the underlying global variable for D, as determined by its declaratio...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * convertTypeForLoadStore(QualType T, llvm::Type *LLVMTy=nullptr)
Given that T is a scalar type, return the IR type that should be used for load and store operations.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
bool typeRequiresSplitIntoByteArray(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
Check whether the given type needs to be laid out in memory using an opaque byte-array type because i...
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
Definition: CGCall.cpp:462
llvm::Constant * tryEmitAbstractForInitializer(const VarDecl &D)
Try to emit the initializer of the given declaration as an abstract constant.
A cleanup scope which generates the cleanup blocks lazily.
Definition: CGCleanup.h:243
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:141
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:203
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
iterator begin() const
Returns an iterator pointing to the innermost EH scope.
Definition: CGCleanup.h:615
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
QualType getType() const
Definition: CGValue.h:291
void setNonGC(bool Value)
Definition: CGValue.h:304
void setAddress(Address address)
Definition: CGValue.h:363
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:293
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
An abstract representation of an aligned address.
Definition: Address.h:42
llvm::Value * getPointer() const
Definition: Address.h:66
static RawAddress invalid()
Definition: Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:372
void reportGlobal(llvm::GlobalVariable *GV, const VarDecl &D, bool IsDynInit=false)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
Definition: TargetInfo.h:76
bool IsBypassed(const VarDecl *D) const
Returns true if the variable declaration was by bypassed by any goto or switch statement.
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1611
body_range body()
Definition: Stmt.h:1674
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Definition: DeclBase.h:1436
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
const DeclContext * getParentFunctionOrMethod(bool LexicalParent=false) const
If this decl is defined inside a function/method/block it returns the corresponding DeclContext,...
Definition: DeclBase.cpp:317
T * getAttr() const
Definition: DeclBase.h:580
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
Definition: DeclBase.cpp:1242
SourceLocation getLocation() const
Definition: DeclBase.h:446
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:552
DeclContext * getDeclContext()
Definition: DeclBase.h:455
bool hasAttr() const
Definition: DeclBase.h:584
Kind getKind() const
Definition: DeclBase.h:449
This represents one expression.
Definition: Expr.h:110
bool isXValue() const
Definition: Expr.h:279
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:3075
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3066
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition: Expr.h:277
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a function declaration or definition.
Definition: Decl.h:1932
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
One of these records is kept for each identifier that is lexed.
IdentifierInfo & getOwn(StringRef Name)
Gets an IdentifierInfo for the given name without consulting external sources.
This represents '#pragma omp allocate ...' directive.
Definition: DeclOpenMP.h:474
This represents '#pragma omp declare mapper ...' directive.
Definition: DeclOpenMP.h:287
This represents '#pragma omp declare reduction ...' directive.
Definition: DeclOpenMP.h:177
This represents '#pragma omp requires...' directive.
Definition: DeclOpenMP.h:417
A (possibly-)qualified type.
Definition: Type.h:941
@ DK_cxx_destructor
Definition: Type.h:1532
@ DK_nontrivial_c_struct
Definition: Type.h:1535
@ DK_objc_weak_lifetime
Definition: Type.h:1534
@ DK_objc_strong_lifetime
Definition: Type.h:1533
@ PDIK_Struct
The type is a struct containing a field whose type is not PCK_Trivial.
Definition: Type.h:1478
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7881
bool isConstant(const ASTContext &Ctx) const
Definition: Type.h:1101
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7795
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1444
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:7956
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:7849
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1040
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition: Type.cpp:2596
The collection of all-type qualifiers we support.
Definition: Type.h:319
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:348
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:341
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:337
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:351
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:354
bool hasConst() const
Definition: Type.h:444
ObjCLifetime getObjCLifetime() const
Definition: Type.h:532
bool isParamDestroyedInCallee() const
Definition: Decl.h:4287
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5970
RecordDecl * getDecl() const
Definition: Type.h:5980
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
static const uint64_t MaximumAlignment
Definition: Sema.h:891
Encodes a location in the source.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4417
Stmt - This represents one statement.
Definition: Stmt.h:84
child_range children()
Definition: Stmt.cpp:287
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
Definition: TargetCXXABI.h:136
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1327
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
Definition: Type.cpp:2352
bool isArrayType() const
Definition: Type.h:8080
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8612
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2718
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8545
bool isRecordType() const
Definition: Type.h:8108
std::optional< NullabilityKind > getNullability() const
Determine the nullability of the given type.
Definition: Type.cpp:4693
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:667
QualType getType() const
Definition: Decl.h:678
Represents a variable declaration or definition.
Definition: Decl.h:879
static VarDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, StorageClass S)
Definition: Decl.cpp:2133
bool hasGlobalStorage() const
Returns true for all variables that do not have local storage.
Definition: Decl.h:1174
const Expr * getInit() const
Definition: Decl.h:1316
bool isLocalVarDecl() const
Returns true for local variable declarations other than parameters.
Definition: Decl.h:1201
Defines the clang::TargetInfo interface.
@ BLOCK_FIELD_IS_BYREF
Definition: CGBlocks.h:92
@ BLOCK_FIELD_IS_WEAK
Definition: CGBlocks.h:94
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
Definition: PatternInit.cpp:15
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
ARCPreciseLifetime_t
Does an ARC strong l-value have precise lifetime?
Definition: CGValue.h:135
@ ARCPreciseLifetime
Definition: CGValue.h:136
@ ARCImpreciseLifetime
Definition: CGValue.h:136
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition: CNFFormula.h:64
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:3844
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2263
bool Null(InterpState &S, CodePtr OpPC, const Descriptor *Desc)
Definition: Interp.h:2279
The JSON file list parser is used to communicate input to InstallAPI.
@ Ctor_Base
Base object ctor.
Definition: ABI.h:26
@ OpenCL
Definition: LangStandard.h:66
@ CPlusPlus
Definition: LangStandard.h:56
@ NonNull
Values of this type can never be null.
@ SC_Auto
Definition: Specifiers.h:256
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
Definition: Linkage.h:24
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:329
@ Dtor_Base
Base object dtor.
Definition: ABI.h:36
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
@ VK_LValue
An l-value expression is a reference to an object with independent storage.
Definition: Specifiers.h:139
const FunctionProtoType * T
@ ThreadPrivateVar
Parameter for Thread private variable.
float __ovld __cnfn length(float)
Return the length of vector p, i.e., sqrt(p.x2 + p.y 2 + ...)
static Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable /p VD.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::PointerType * AllocaInt8PtrTy
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function.
Definition: EHScopeStack.h:65
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159