clang 17.0.0git
ItaniumCXXABI.cpp
Go to the documentation of this file.
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGRecordLayout.h"
23#include "CGVTables.h"
24#include "CodeGenFunction.h"
25#include "CodeGenModule.h"
26#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/Mangle.h"
29#include "clang/AST/StmtCXX.h"
30#include "clang/AST/Type.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/ScopedPrinter.h"
38
39using namespace clang;
40using namespace CodeGen;
41
42namespace {
43class ItaniumCXXABI : public CodeGen::CGCXXABI {
44 /// VTables - All the vtables which have been defined.
45 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46
47 /// All the thread wrapper functions that have been used.
49 ThreadWrappers;
50
51protected:
52 bool UseARMMethodPtrABI;
53 bool UseARMGuardVarABI;
54 bool Use32BitVTableOffsetABI;
55
57 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58 }
59
60public:
61 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62 bool UseARMMethodPtrABI = false,
63 bool UseARMGuardVarABI = false) :
64 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65 UseARMGuardVarABI(UseARMGuardVarABI),
66 Use32BitVTableOffsetABI(false) { }
67
68 bool classifyReturnType(CGFunctionInfo &FI) const override;
69
70 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71 // If C++ prohibits us from making a copy, pass by address.
72 if (!RD->canPassInRegisters())
73 return RAA_Indirect;
74 return RAA_Default;
75 }
76
77 bool isThisCompleteObject(GlobalDecl GD) const override {
78 // The Itanium ABI has separate complete-object vs. base-object
79 // variants of both constructors and destructors.
80 if (isa<CXXDestructorDecl>(GD.getDecl())) {
81 switch (GD.getDtorType()) {
82 case Dtor_Complete:
83 case Dtor_Deleting:
84 return true;
85
86 case Dtor_Base:
87 return false;
88
89 case Dtor_Comdat:
90 llvm_unreachable("emitting dtor comdat as function?");
91 }
92 llvm_unreachable("bad dtor kind");
93 }
94 if (isa<CXXConstructorDecl>(GD.getDecl())) {
95 switch (GD.getCtorType()) {
96 case Ctor_Complete:
97 return true;
98
99 case Ctor_Base:
100 return false;
101
104 llvm_unreachable("closure ctors in Itanium ABI?");
105
106 case Ctor_Comdat:
107 llvm_unreachable("emitting ctor comdat as function?");
108 }
109 llvm_unreachable("bad dtor kind");
110 }
111
112 // No other kinds.
113 return false;
114 }
115
116 bool isZeroInitializable(const MemberPointerType *MPT) override;
117
118 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119
122 const Expr *E,
123 Address This,
124 llvm::Value *&ThisPtrForCall,
125 llvm::Value *MemFnPtr,
126 const MemberPointerType *MPT) override;
127
128 llvm::Value *
131 llvm::Value *MemPtr,
132 const MemberPointerType *MPT) override;
133
135 const CastExpr *E,
136 llvm::Value *Src) override;
137 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138 llvm::Constant *Src) override;
139
140 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141
142 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144 CharUnits offset) override;
145 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
148
150 llvm::Value *L, llvm::Value *R,
151 const MemberPointerType *MPT,
152 bool Inequality) override;
153
155 llvm::Value *Addr,
156 const MemberPointerType *MPT) override;
157
159 Address Ptr, QualType ElementType,
160 const CXXDestructorDecl *Dtor) override;
161
162 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164
165 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166
167 llvm::CallInst *
169 llvm::Value *Exn) override;
170
171 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
175 QualType CatchHandlerType) override {
177 }
178
179 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182 Address ThisPtr,
183 llvm::Type *StdTypeInfoPtrTy) override;
184
185 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186 QualType SrcRecordTy) override;
187
189 QualType SrcRecordTy, QualType DestTy,
190 QualType DestRecordTy,
191 llvm::BasicBlock *CastEnd) override;
192
194 QualType SrcRecordTy,
195 QualType DestTy) override;
196
197 bool EmitBadCastCall(CodeGenFunction &CGF) override;
198
199 llvm::Value *
201 const CXXRecordDecl *ClassDecl,
202 const CXXRecordDecl *BaseClassDecl) override;
203
204 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205
206 AddedStructorArgCounts
208 SmallVectorImpl<CanQualType> &ArgTys) override;
209
211 CXXDtorType DT) const override {
212 // Itanium does not emit any destructor variant as an inline thunk.
213 // Delegating may occur as an optimization, but all variants are either
214 // emitted with external linkage or as linkonce if they are inline and used.
215 return false;
216 }
217
218 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219
221 FunctionArgList &Params) override;
222
224
225 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226 const CXXConstructorDecl *D,
228 bool ForVirtualBase,
229 bool Delegating) override;
230
232 const CXXDestructorDecl *DD,
234 bool ForVirtualBase,
235 bool Delegating) override;
236
238 CXXDtorType Type, bool ForVirtualBase,
239 bool Delegating, Address This,
240 QualType ThisTy) override;
241
243 const CXXRecordDecl *RD) override;
244
246 CodeGenFunction::VPtr Vptr) override;
247
248 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249 return true;
250 }
251
252 llvm::Constant *
254 const CXXRecordDecl *VTableClass) override;
255
257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259
260 llvm::Value *getVTableAddressPointInStructorWithVTT(
261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263
264 llvm::Constant *
266 const CXXRecordDecl *VTableClass) override;
267
268 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269 CharUnits VPtrOffset) override;
270
272 Address This, llvm::Type *Ty,
273 SourceLocation Loc) override;
274
276 const CXXDestructorDecl *Dtor,
277 CXXDtorType DtorType, Address This,
278 DeleteOrMemberCallExpr E) override;
279
280 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281
282 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284
285 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286 bool ReturnAdjustment) override {
287 // Allow inlining of thunks by emitting them with available_externally
288 // linkage together with vtables when needed.
289 if (ForVTable && !Thunk->hasLocalLinkage())
290 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291 CGM.setGVProperties(Thunk, GD);
292 }
293
294 bool exportThunk() override { return true; }
295
296 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297 const ThisAdjustment &TA) override;
298
299 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300 const ReturnAdjustment &RA) override;
301
303 FunctionArgList &Args) const override {
304 assert(!Args.empty() && "expected the arglist to not be empty!");
305 return Args.size() - 1;
306 }
307
308 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309 StringRef GetDeletedVirtualCallName() override
310 { return "__cxa_deleted_virtual"; }
311
312 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
314 Address NewPtr,
315 llvm::Value *NumElements,
316 const CXXNewExpr *expr,
317 QualType ElementType) override;
318 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319 Address allocPtr,
320 CharUnits cookieSize) override;
321
322 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323 llvm::GlobalVariable *DeclPtr,
324 bool PerformInit) override;
325 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326 llvm::FunctionCallee dtor,
327 llvm::Constant *addr) override;
328
329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330 llvm::Value *Val);
332 CodeGenModule &CGM,
333 ArrayRef<const VarDecl *> CXXThreadLocals,
334 ArrayRef<llvm::Function *> CXXThreadLocalInits,
335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336
337 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
340 }
342 QualType LValType) override;
343
344 bool NeedsVTTParameter(GlobalDecl GD) override;
345
346 /**************************** RTTI Uniqueness ******************************/
347
348protected:
349 /// Returns true if the ABI requires RTTI type_info objects to be unique
350 /// across a program.
351 virtual bool shouldRTTIBeUnique() const { return true; }
352
353public:
354 /// What sort of unique-RTTI behavior should we use?
355 enum RTTIUniquenessKind {
356 /// We are guaranteeing, or need to guarantee, that the RTTI string
357 /// is unique.
358 RUK_Unique,
359
360 /// We are not guaranteeing uniqueness for the RTTI string, so we
361 /// can demote to hidden visibility but must use string comparisons.
362 RUK_NonUniqueHidden,
363
364 /// We are not guaranteeing uniqueness for the RTTI string, so we
365 /// have to use string comparisons, but we also have to emit it with
366 /// non-hidden visibility.
367 RUK_NonUniqueVisible
368 };
369
370 /// Return the required visibility status for the given type and linkage in
371 /// the current ABI.
372 RTTIUniquenessKind
373 classifyRTTIUniqueness(QualType CanTy,
374 llvm::GlobalValue::LinkageTypes Linkage) const;
375 friend class ItaniumRTTIBuilder;
376
377 void emitCXXStructor(GlobalDecl GD) override;
378
379 std::pair<llvm::Value *, const CXXRecordDecl *>
381 const CXXRecordDecl *RD) override;
382
383 private:
384 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
385 const auto &VtableLayout =
386 CGM.getItaniumVTableContext().getVTableLayout(RD);
387
388 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
389 // Skip empty slot.
390 if (!VtableComponent.isUsedFunctionPointerKind())
391 continue;
392
393 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
394 if (!Method->getCanonicalDecl()->isInlined())
395 continue;
396
397 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
398 auto *Entry = CGM.GetGlobalValue(Name);
399 // This checks if virtual inline function has already been emitted.
400 // Note that it is possible that this inline function would be emitted
401 // after trying to emit vtable speculatively. Because of this we do
402 // an extra pass after emitting all deferred vtables to find and emit
403 // these vtables opportunistically.
404 if (!Entry || Entry->isDeclaration())
405 return true;
406 }
407 return false;
408 }
409
410 bool isVTableHidden(const CXXRecordDecl *RD) const {
411 const auto &VtableLayout =
412 CGM.getItaniumVTableContext().getVTableLayout(RD);
413
414 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
415 if (VtableComponent.isRTTIKind()) {
416 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
417 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
418 return true;
419 } else if (VtableComponent.isUsedFunctionPointerKind()) {
420 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
421 if (Method->getVisibility() == Visibility::HiddenVisibility &&
422 !Method->isDefined())
423 return true;
424 }
425 }
426 return false;
427 }
428};
429
430class ARMCXXABI : public ItaniumCXXABI {
431public:
432 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
433 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
434 /*UseARMGuardVarABI=*/true) {}
435
436 bool constructorsAndDestructorsReturnThis() const override { return true; }
437
438 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
439 QualType ResTy) override;
440
441 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
442 Address InitializeArrayCookie(CodeGenFunction &CGF,
443 Address NewPtr,
444 llvm::Value *NumElements,
445 const CXXNewExpr *expr,
446 QualType ElementType) override;
447 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
448 CharUnits cookieSize) override;
449};
450
451class AppleARM64CXXABI : public ARMCXXABI {
452public:
453 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
454 Use32BitVTableOffsetABI = true;
455 }
456
457 // ARM64 libraries are prepared for non-unique RTTI.
458 bool shouldRTTIBeUnique() const override { return false; }
459};
460
461class FuchsiaCXXABI final : public ItaniumCXXABI {
462public:
463 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
464 : ItaniumCXXABI(CGM) {}
465
466private:
467 bool constructorsAndDestructorsReturnThis() const override { return true; }
468};
469
470class WebAssemblyCXXABI final : public ItaniumCXXABI {
471public:
472 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
473 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
474 /*UseARMGuardVarABI=*/true) {}
475 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
476 llvm::CallInst *
477 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
478 llvm::Value *Exn) override;
479
480private:
481 bool constructorsAndDestructorsReturnThis() const override { return true; }
482 bool canCallMismatchedFunctionType() const override { return false; }
483};
484
485class XLCXXABI final : public ItaniumCXXABI {
486public:
487 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
488 : ItaniumCXXABI(CGM) {}
489
490 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
491 llvm::FunctionCallee dtor,
492 llvm::Constant *addr) override;
493
494 bool useSinitAndSterm() const override { return true; }
495
496private:
497 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
498 llvm::Constant *addr);
499};
500}
501
503 switch (CGM.getContext().getCXXABIKind()) {
504 // For IR-generation purposes, there's no significant difference
505 // between the ARM and iOS ABIs.
506 case TargetCXXABI::GenericARM:
507 case TargetCXXABI::iOS:
508 case TargetCXXABI::WatchOS:
509 return new ARMCXXABI(CGM);
510
511 case TargetCXXABI::AppleARM64:
512 return new AppleARM64CXXABI(CGM);
513
514 case TargetCXXABI::Fuchsia:
515 return new FuchsiaCXXABI(CGM);
516
517 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
518 // include the other 32-bit ARM oddities: constructor/destructor return values
519 // and array cookies.
520 case TargetCXXABI::GenericAArch64:
521 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
522 /*UseARMGuardVarABI=*/true);
523
524 case TargetCXXABI::GenericMIPS:
525 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
526
527 case TargetCXXABI::WebAssembly:
528 return new WebAssemblyCXXABI(CGM);
529
530 case TargetCXXABI::XL:
531 return new XLCXXABI(CGM);
532
533 case TargetCXXABI::GenericItanium:
534 if (CGM.getContext().getTargetInfo().getTriple().getArch()
535 == llvm::Triple::le32) {
536 // For PNaCl, use ARM-style method pointers so that PNaCl code
537 // does not assume anything about the alignment of function
538 // pointers.
539 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
540 }
541 return new ItaniumCXXABI(CGM);
542
543 case TargetCXXABI::Microsoft:
544 llvm_unreachable("Microsoft ABI is not Itanium-based");
545 }
546 llvm_unreachable("bad ABI kind");
547}
548
549llvm::Type *
550ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
551 if (MPT->isMemberDataPointer())
552 return CGM.PtrDiffTy;
553 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
554}
555
556/// In the Itanium and ARM ABIs, method pointers have the form:
557/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
558///
559/// In the Itanium ABI:
560/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
561/// - the this-adjustment is (memptr.adj)
562/// - the virtual offset is (memptr.ptr - 1)
563///
564/// In the ARM ABI:
565/// - method pointers are virtual if (memptr.adj & 1) is nonzero
566/// - the this-adjustment is (memptr.adj >> 1)
567/// - the virtual offset is (memptr.ptr)
568/// ARM uses 'adj' for the virtual flag because Thumb functions
569/// may be only single-byte aligned.
570///
571/// If the member is virtual, the adjusted 'this' pointer points
572/// to a vtable pointer from which the virtual offset is applied.
573///
574/// If the member is non-virtual, memptr.ptr is the address of
575/// the function to call.
576CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
577 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
578 llvm::Value *&ThisPtrForCall,
579 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
580 CGBuilderTy &Builder = CGF.Builder;
581
582 const FunctionProtoType *FPT =
584 auto *RD =
585 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
586
587 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
588 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
589
590 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
591
592 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
593 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
594 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
595
596 // Extract memptr.adj, which is in the second field.
597 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
598
599 // Compute the true adjustment.
600 llvm::Value *Adj = RawAdj;
601 if (UseARMMethodPtrABI)
602 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
603
604 // Apply the adjustment and cast back to the original struct type
605 // for consistency.
606 llvm::Value *This = ThisAddr.getPointer();
607 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
608 Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
609 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
610 ThisPtrForCall = This;
611
612 // Load the function pointer.
613 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
614
615 // If the LSB in the function pointer is 1, the function pointer points to
616 // a virtual function.
617 llvm::Value *IsVirtual;
618 if (UseARMMethodPtrABI)
619 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
620 else
621 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
622 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
623 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
624
625 // In the virtual path, the adjustment left 'This' pointing to the
626 // vtable of the correct base subobject. The "function pointer" is an
627 // offset within the vtable (+1 for the virtual flag on non-ARM).
628 CGF.EmitBlock(FnVirtual);
629
630 // Cast the adjusted this to a pointer to vtable pointer and load.
631 llvm::Type *VTableTy = Builder.getInt8PtrTy();
632 CharUnits VTablePtrAlign =
633 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
634 CGF.getPointerAlign());
635 llvm::Value *VTable = CGF.GetVTablePtr(
636 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
637
638 // Apply the offset.
639 // On ARM64, to reserve extra space in virtual member function pointers,
640 // we only pay attention to the low 32 bits of the offset.
641 llvm::Value *VTableOffset = FnAsInt;
642 if (!UseARMMethodPtrABI)
643 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
644 if (Use32BitVTableOffsetABI) {
645 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
646 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
647 }
648
649 // Check the address of the function pointer if CFI on member function
650 // pointers is enabled.
651 llvm::Constant *CheckSourceLocation;
652 llvm::Constant *CheckTypeDesc;
653 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
654 CGM.HasHiddenLTOVisibility(RD);
655 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
656 CGM.HasHiddenLTOVisibility(RD);
657 bool ShouldEmitWPDInfo =
658 CGM.getCodeGenOpts().WholeProgramVTables &&
659 // Don't insert type tests if we are forcing public visibility.
660 !CGM.AlwaysHasLTOVisibilityPublic(RD);
661 llvm::Value *VirtualFn = nullptr;
662
663 {
664 CodeGenFunction::SanitizerScope SanScope(&CGF);
665 llvm::Value *TypeId = nullptr;
666 llvm::Value *CheckResult = nullptr;
667
668 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
669 // If doing CFI, VFE or WPD, we will need the metadata node to check
670 // against.
671 llvm::Metadata *MD =
672 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
673 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
674 }
675
676 if (ShouldEmitVFEInfo) {
677 llvm::Value *VFPAddr =
678 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
679
680 // If doing VFE, load from the vtable with a type.checked.load intrinsic
681 // call. Note that we use the GEP to calculate the address to load from
682 // and pass 0 as the offset to the intrinsic. This is because every
683 // vtable slot of the correct type is marked with matching metadata, and
684 // we know that the load must be from one of these slots.
685 llvm::Value *CheckedLoad = Builder.CreateCall(
686 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
687 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
688 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
689 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
690 VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
691 "memptr.virtualfn");
692 } else {
693 // When not doing VFE, emit a normal load, as it allows more
694 // optimisations than type.checked.load.
695 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
696 llvm::Value *VFPAddr =
697 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
698 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
699 ? llvm::Intrinsic::type_test
700 : llvm::Intrinsic::public_type_test;
701
702 CheckResult = Builder.CreateCall(
703 CGM.getIntrinsic(IID),
704 {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
705 }
706
707 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
708 VirtualFn = CGF.Builder.CreateCall(
709 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
710 {VTableOffset->getType()}),
711 {VTable, VTableOffset});
712 VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
713 } else {
714 llvm::Value *VFPAddr =
715 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
716 VFPAddr = CGF.Builder.CreateBitCast(
717 VFPAddr, FTy->getPointerTo()->getPointerTo());
718 VirtualFn = CGF.Builder.CreateAlignedLoad(
719 FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
720 "memptr.virtualfn");
721 }
722 }
723 assert(VirtualFn && "Virtual fuction pointer not created!");
724 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
725 CheckResult) &&
726 "Check result required but not created!");
727
728 if (ShouldEmitCFICheck) {
729 // If doing CFI, emit the check.
730 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
731 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
732 llvm::Constant *StaticData[] = {
733 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
734 CheckSourceLocation,
735 CheckTypeDesc,
736 };
737
738 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
739 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
740 } else {
741 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
742 CGM.getLLVMContext(),
743 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
744 llvm::Value *ValidVtable = Builder.CreateCall(
745 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
746 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
747 SanitizerHandler::CFICheckFail, StaticData,
748 {VTable, ValidVtable});
749 }
750
751 FnVirtual = Builder.GetInsertBlock();
752 }
753 } // End of sanitizer scope
754
755 CGF.EmitBranch(FnEnd);
756
757 // In the non-virtual path, the function pointer is actually a
758 // function pointer.
759 CGF.EmitBlock(FnNonVirtual);
760 llvm::Value *NonVirtualFn =
761 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
762
763 // Check the function pointer if CFI on member function pointers is enabled.
764 if (ShouldEmitCFICheck) {
766 if (RD->hasDefinition()) {
767 CodeGenFunction::SanitizerScope SanScope(&CGF);
768
769 llvm::Constant *StaticData[] = {
770 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
771 CheckSourceLocation,
772 CheckTypeDesc,
773 };
774
775 llvm::Value *Bit = Builder.getFalse();
776 llvm::Value *CastedNonVirtualFn =
777 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
778 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
779 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
780 getContext().getMemberPointerType(
781 MPT->getPointeeType(),
782 getContext().getRecordType(Base).getTypePtr()));
783 llvm::Value *TypeId =
784 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
785
786 llvm::Value *TypeTest =
787 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
788 {CastedNonVirtualFn, TypeId});
789 Bit = Builder.CreateOr(Bit, TypeTest);
790 }
791
792 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
793 SanitizerHandler::CFICheckFail, StaticData,
794 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
795
796 FnNonVirtual = Builder.GetInsertBlock();
797 }
798 }
799
800 // We're done.
801 CGF.EmitBlock(FnEnd);
802 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
803 CalleePtr->addIncoming(VirtualFn, FnVirtual);
804 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
805
806 CGCallee Callee(FPT, CalleePtr);
807 return Callee;
808}
809
810/// Compute an l-value by applying the given pointer-to-member to a
811/// base object.
812llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
813 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
814 const MemberPointerType *MPT) {
815 assert(MemPtr->getType() == CGM.PtrDiffTy);
816
817 CGBuilderTy &Builder = CGF.Builder;
818
819 // Cast to char*.
820 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
821
822 // Apply the offset, which we assume is non-null.
823 llvm::Value *Addr = Builder.CreateInBoundsGEP(
824 Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
825
826 // Cast the address to the appropriate pointer type, adopting the
827 // address space of the base pointer.
828 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
829 ->getPointerTo(Base.getAddressSpace());
830 return Builder.CreateBitCast(Addr, PType);
831}
832
833/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
834/// conversion.
835///
836/// Bitcast conversions are always a no-op under Itanium.
837///
838/// Obligatory offset/adjustment diagram:
839/// <-- offset --> <-- adjustment -->
840/// |--------------------------|----------------------|--------------------|
841/// ^Derived address point ^Base address point ^Member address point
842///
843/// So when converting a base member pointer to a derived member pointer,
844/// we add the offset to the adjustment because the address point has
845/// decreased; and conversely, when converting a derived MP to a base MP
846/// we subtract the offset from the adjustment because the address point
847/// has increased.
848///
849/// The standard forbids (at compile time) conversion to and from
850/// virtual bases, which is why we don't have to consider them here.
851///
852/// The standard forbids (at run time) casting a derived MP to a base
853/// MP when the derived MP does not point to a member of the base.
854/// This is why -1 is a reasonable choice for null data member
855/// pointers.
856llvm::Value *
857ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
858 const CastExpr *E,
859 llvm::Value *src) {
860 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
861 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
862 E->getCastKind() == CK_ReinterpretMemberPointer);
863
864 // Under Itanium, reinterprets don't require any additional processing.
865 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
866
867 // Use constant emission if we can.
868 if (isa<llvm::Constant>(src))
869 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
870
871 llvm::Constant *adj = getMemberPointerAdjustment(E);
872 if (!adj) return src;
873
874 CGBuilderTy &Builder = CGF.Builder;
875 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
876
877 const MemberPointerType *destTy =
879
880 // For member data pointers, this is just a matter of adding the
881 // offset if the source is non-null.
882 if (destTy->isMemberDataPointer()) {
883 llvm::Value *dst;
884 if (isDerivedToBase)
885 dst = Builder.CreateNSWSub(src, adj, "adj");
886 else
887 dst = Builder.CreateNSWAdd(src, adj, "adj");
888
889 // Null check.
890 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
891 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
892 return Builder.CreateSelect(isNull, src, dst);
893 }
894
895 // The this-adjustment is left-shifted by 1 on ARM.
896 if (UseARMMethodPtrABI) {
897 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
898 offset <<= 1;
899 adj = llvm::ConstantInt::get(adj->getType(), offset);
900 }
901
902 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
903 llvm::Value *dstAdj;
904 if (isDerivedToBase)
905 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
906 else
907 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
908
909 return Builder.CreateInsertValue(src, dstAdj, 1);
910}
911
912llvm::Constant *
913ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
914 llvm::Constant *src) {
915 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
916 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
917 E->getCastKind() == CK_ReinterpretMemberPointer);
918
919 // Under Itanium, reinterprets don't require any additional processing.
920 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
921
922 // If the adjustment is trivial, we don't need to do anything.
923 llvm::Constant *adj = getMemberPointerAdjustment(E);
924 if (!adj) return src;
925
926 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
927
928 const MemberPointerType *destTy =
930
931 // For member data pointers, this is just a matter of adding the
932 // offset if the source is non-null.
933 if (destTy->isMemberDataPointer()) {
934 // null maps to null.
935 if (src->isAllOnesValue()) return src;
936
937 if (isDerivedToBase)
938 return llvm::ConstantExpr::getNSWSub(src, adj);
939 else
940 return llvm::ConstantExpr::getNSWAdd(src, adj);
941 }
942
943 // The this-adjustment is left-shifted by 1 on ARM.
944 if (UseARMMethodPtrABI) {
945 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
946 offset <<= 1;
947 adj = llvm::ConstantInt::get(adj->getType(), offset);
948 }
949
950 llvm::Constant *srcAdj = src->getAggregateElement(1);
951 llvm::Constant *dstAdj;
952 if (isDerivedToBase)
953 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
954 else
955 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
956
957 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
958 assert(res != nullptr && "Folding must succeed");
959 return res;
960}
961
962llvm::Constant *
963ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
964 // Itanium C++ ABI 2.3:
965 // A NULL pointer is represented as -1.
966 if (MPT->isMemberDataPointer())
967 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
968
969 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
970 llvm::Constant *Values[2] = { Zero, Zero };
971 return llvm::ConstantStruct::getAnon(Values);
972}
973
974llvm::Constant *
975ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
976 CharUnits offset) {
977 // Itanium C++ ABI 2.3:
978 // A pointer to data member is an offset from the base address of
979 // the class object containing it, represented as a ptrdiff_t
980 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
981}
982
983llvm::Constant *
984ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
985 return BuildMemberPointer(MD, CharUnits::Zero());
986}
987
988llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
990 assert(MD->isInstance() && "Member function must not be static!");
991
992 CodeGenTypes &Types = CGM.getTypes();
993
994 // Get the function pointer (or index if this is a virtual function).
995 llvm::Constant *MemPtr[2];
996 if (MD->isVirtual()) {
997 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
998 uint64_t VTableOffset;
999 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1000 // Multiply by 4-byte relative offsets.
1001 VTableOffset = Index * 4;
1002 } else {
1003 const ASTContext &Context = getContext();
1004 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1005 Context.getTargetInfo().getPointerWidth(LangAS::Default));
1006 VTableOffset = Index * PointerWidth.getQuantity();
1007 }
1008
1009 if (UseARMMethodPtrABI) {
1010 // ARM C++ ABI 3.2.1:
1011 // This ABI specifies that adj contains twice the this
1012 // adjustment, plus 1 if the member function is virtual. The
1013 // least significant bit of adj then makes exactly the same
1014 // discrimination as the least significant bit of ptr does for
1015 // Itanium.
1016 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1017 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1018 2 * ThisAdjustment.getQuantity() + 1);
1019 } else {
1020 // Itanium C++ ABI 2.3:
1021 // For a virtual function, [the pointer field] is 1 plus the
1022 // virtual table offset (in bytes) of the function,
1023 // represented as a ptrdiff_t.
1024 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1025 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1026 ThisAdjustment.getQuantity());
1027 }
1028 } else {
1029 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1030 llvm::Type *Ty;
1031 // Check whether the function has a computable LLVM signature.
1032 if (Types.isFuncTypeConvertible(FPT)) {
1033 // The function has a computable LLVM signature; use the correct type.
1034 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1035 } else {
1036 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1037 // function type is incomplete.
1038 Ty = CGM.PtrDiffTy;
1039 }
1040 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1041
1042 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1043 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1044 (UseARMMethodPtrABI ? 2 : 1) *
1045 ThisAdjustment.getQuantity());
1046 }
1047
1048 return llvm::ConstantStruct::getAnon(MemPtr);
1049}
1050
1051llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1052 QualType MPType) {
1053 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1054 const ValueDecl *MPD = MP.getMemberPointerDecl();
1055 if (!MPD)
1056 return EmitNullMemberPointer(MPT);
1057
1058 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1059
1060 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1061 return BuildMemberPointer(MD, ThisAdjustment);
1062
1063 CharUnits FieldOffset =
1064 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1065 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1066}
1067
1068/// The comparison algorithm is pretty easy: the member pointers are
1069/// the same if they're either bitwise identical *or* both null.
1070///
1071/// ARM is different here only because null-ness is more complicated.
1072llvm::Value *
1073ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1074 llvm::Value *L,
1075 llvm::Value *R,
1076 const MemberPointerType *MPT,
1077 bool Inequality) {
1078 CGBuilderTy &Builder = CGF.Builder;
1079
1080 llvm::ICmpInst::Predicate Eq;
1081 llvm::Instruction::BinaryOps And, Or;
1082 if (Inequality) {
1083 Eq = llvm::ICmpInst::ICMP_NE;
1084 And = llvm::Instruction::Or;
1085 Or = llvm::Instruction::And;
1086 } else {
1087 Eq = llvm::ICmpInst::ICMP_EQ;
1088 And = llvm::Instruction::And;
1089 Or = llvm::Instruction::Or;
1090 }
1091
1092 // Member data pointers are easy because there's a unique null
1093 // value, so it just comes down to bitwise equality.
1094 if (MPT->isMemberDataPointer())
1095 return Builder.CreateICmp(Eq, L, R);
1096
1097 // For member function pointers, the tautologies are more complex.
1098 // The Itanium tautology is:
1099 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1100 // The ARM tautology is:
1101 // (L == R) <==> (L.ptr == R.ptr &&
1102 // (L.adj == R.adj ||
1103 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1104 // The inequality tautologies have exactly the same structure, except
1105 // applying De Morgan's laws.
1106
1107 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1108 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1109
1110 // This condition tests whether L.ptr == R.ptr. This must always be
1111 // true for equality to hold.
1112 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1113
1114 // This condition, together with the assumption that L.ptr == R.ptr,
1115 // tests whether the pointers are both null. ARM imposes an extra
1116 // condition.
1117 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1118 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1119
1120 // This condition tests whether L.adj == R.adj. If this isn't
1121 // true, the pointers are unequal unless they're both null.
1122 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1123 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1124 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1125
1126 // Null member function pointers on ARM clear the low bit of Adj,
1127 // so the zero condition has to check that neither low bit is set.
1128 if (UseARMMethodPtrABI) {
1129 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1130
1131 // Compute (l.adj | r.adj) & 1 and test it against zero.
1132 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1133 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1134 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1135 "cmp.or.adj");
1136 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1137 }
1138
1139 // Tie together all our conditions.
1140 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1141 Result = Builder.CreateBinOp(And, PtrEq, Result,
1142 Inequality ? "memptr.ne" : "memptr.eq");
1143 return Result;
1144}
1145
1146llvm::Value *
1147ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1148 llvm::Value *MemPtr,
1149 const MemberPointerType *MPT) {
1150 CGBuilderTy &Builder = CGF.Builder;
1151
1152 /// For member data pointers, this is just a check against -1.
1153 if (MPT->isMemberDataPointer()) {
1154 assert(MemPtr->getType() == CGM.PtrDiffTy);
1155 llvm::Value *NegativeOne =
1156 llvm::Constant::getAllOnesValue(MemPtr->getType());
1157 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1158 }
1159
1160 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1161 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1162
1163 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1164 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1165
1166 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1167 // (the virtual bit) is set.
1168 if (UseARMMethodPtrABI) {
1169 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1170 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1171 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1172 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1173 "memptr.isvirtual");
1174 Result = Builder.CreateOr(Result, IsVirtual);
1175 }
1176
1177 return Result;
1178}
1179
1180bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1181 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1182 if (!RD)
1183 return false;
1184
1185 // If C++ prohibits us from making a copy, return by address.
1186 if (!RD->canPassInRegisters()) {
1187 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1188 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1189 return true;
1190 }
1191 return false;
1192}
1193
1194/// The Itanium ABI requires non-zero initialization only for data
1195/// member pointers, for which '0' is a valid offset.
1196bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1197 return MPT->isMemberFunctionPointer();
1198}
1199
1200/// The Itanium ABI always places an offset to the complete object
1201/// at entry -2 in the vtable.
1202void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1203 const CXXDeleteExpr *DE,
1204 Address Ptr,
1205 QualType ElementType,
1206 const CXXDestructorDecl *Dtor) {
1207 bool UseGlobalDelete = DE->isGlobalDelete();
1208 if (UseGlobalDelete) {
1209 // Derive the complete-object pointer, which is what we need
1210 // to pass to the deallocation function.
1211
1212 // Grab the vtable pointer as an intptr_t*.
1213 auto *ClassDecl =
1214 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1215 llvm::Value *VTable =
1216 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1217
1218 // Track back to entry -2 and pull out the offset there.
1219 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1220 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1221 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
1222
1223 // Apply the offset.
1224 llvm::Value *CompletePtr =
1225 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1226 CompletePtr =
1227 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1228
1229 // If we're supposed to call the global delete, make sure we do so
1230 // even if the destructor throws.
1231 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1232 ElementType);
1233 }
1234
1235 // FIXME: Provide a source location here even though there's no
1236 // CXXMemberCallExpr for dtor call.
1237 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1238 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1239
1240 if (UseGlobalDelete)
1241 CGF.PopCleanupBlock();
1242}
1243
1244void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1245 // void __cxa_rethrow();
1246
1247 llvm::FunctionType *FTy =
1248 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1249
1250 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1251
1252 if (isNoReturn)
1253 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt);
1254 else
1256}
1257
1258static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1259 // void *__cxa_allocate_exception(size_t thrown_size);
1260
1261 llvm::FunctionType *FTy =
1262 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1263
1264 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1265}
1266
1267static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1268 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1269 // void (*dest) (void *));
1270
1271 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1272 llvm::FunctionType *FTy =
1273 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1274
1275 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1276}
1277
1278void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1279 QualType ThrowType = E->getSubExpr()->getType();
1280 // Now allocate the exception object.
1281 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1282 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1283
1284 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1285 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1286 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1287
1288 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1289 CGF.EmitAnyExprToExn(
1290 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1291
1292 // Now throw the exception.
1293 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1294 /*ForEH=*/true);
1295
1296 // The address of the destructor. If the exception type has a
1297 // trivial destructor (or isn't a record), we just pass null.
1298 llvm::Constant *Dtor = nullptr;
1299 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1300 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1301 if (!Record->hasTrivialDestructor()) {
1302 CXXDestructorDecl *DtorD = Record->getDestructor();
1303 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1304 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1305 }
1306 }
1307 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1308
1309 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1311}
1312
1313static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1314 // void *__dynamic_cast(const void *sub,
1315 // const abi::__class_type_info *src,
1316 // const abi::__class_type_info *dst,
1317 // std::ptrdiff_t src2dst_offset);
1318
1319 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1320 llvm::Type *PtrDiffTy =
1322
1323 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1324
1325 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1326
1327 // Mark the function as nounwind readonly.
1328 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1329 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1330 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
1331 llvm::AttributeList Attrs = llvm::AttributeList::get(
1332 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1333
1334 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1335}
1336
1337static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1338 // void __cxa_bad_cast();
1339 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1340 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1341}
1342
1343/// Compute the src2dst_offset hint as described in the
1344/// Itanium C++ ABI [2.9.7]
1346 const CXXRecordDecl *Src,
1347 const CXXRecordDecl *Dst) {
1348 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1349 /*DetectVirtual=*/false);
1350
1351 // If Dst is not derived from Src we can skip the whole computation below and
1352 // return that Src is not a public base of Dst. Record all inheritance paths.
1353 if (!Dst->isDerivedFrom(Src, Paths))
1354 return CharUnits::fromQuantity(-2ULL);
1355
1356 unsigned NumPublicPaths = 0;
1358
1359 // Now walk all possible inheritance paths.
1360 for (const CXXBasePath &Path : Paths) {
1361 if (Path.Access != AS_public) // Ignore non-public inheritance.
1362 continue;
1363
1364 ++NumPublicPaths;
1365
1366 for (const CXXBasePathElement &PathElement : Path) {
1367 // If the path contains a virtual base class we can't give any hint.
1368 // -1: no hint.
1369 if (PathElement.Base->isVirtual())
1370 return CharUnits::fromQuantity(-1ULL);
1371
1372 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1373 continue;
1374
1375 // Accumulate the base class offsets.
1376 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1378 PathElement.Base->getType()->getAsCXXRecordDecl());
1379 }
1380 }
1381
1382 // -2: Src is not a public base of Dst.
1383 if (NumPublicPaths == 0)
1384 return CharUnits::fromQuantity(-2ULL);
1385
1386 // -3: Src is a multiple public base type but never a virtual base type.
1387 if (NumPublicPaths > 1)
1388 return CharUnits::fromQuantity(-3ULL);
1389
1390 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1391 // Return the offset of Src from the origin of Dst.
1392 return Offset;
1393}
1394
1395static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1396 // void __cxa_bad_typeid();
1397 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1398
1399 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1400}
1401
1402bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1403 QualType SrcRecordTy) {
1404 return IsDeref;
1405}
1406
1407void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1408 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1409 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1410 Call->setDoesNotReturn();
1411 CGF.Builder.CreateUnreachable();
1412}
1413
1414llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1415 QualType SrcRecordTy,
1416 Address ThisPtr,
1417 llvm::Type *StdTypeInfoPtrTy) {
1418 auto *ClassDecl =
1419 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1420 llvm::Value *Value =
1421 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1422
1423 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1424 // Load the type info.
1425 Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1426 Value = CGF.Builder.CreateCall(
1427 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1428 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1429
1430 // Setup to dereference again since this is a proxy we accessed.
1431 Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1432 } else {
1433 // Load the type info.
1434 Value =
1435 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1436 }
1437 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1438 CGF.getPointerAlign());
1439}
1440
1441bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1442 QualType SrcRecordTy) {
1443 return SrcIsPtr;
1444}
1445
1446llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1447 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1448 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1449 llvm::Type *PtrDiffLTy =
1451 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1452
1453 llvm::Value *SrcRTTI =
1455 llvm::Value *DestRTTI =
1457
1458 // Compute the offset hint.
1459 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1460 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1461 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1462 PtrDiffLTy,
1463 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1464
1465 // Emit the call to __dynamic_cast.
1466 llvm::Value *Value = ThisAddr.getPointer();
1468
1469 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1471 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1472
1473 /// C++ [expr.dynamic.cast]p9:
1474 /// A failed cast to reference type throws std::bad_cast
1475 if (DestTy->isReferenceType()) {
1476 llvm::BasicBlock *BadCastBlock =
1477 CGF.createBasicBlock("dynamic_cast.bad_cast");
1478
1479 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1480 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1481
1482 CGF.EmitBlock(BadCastBlock);
1483 EmitBadCastCall(CGF);
1484 }
1485
1486 return Value;
1487}
1488
1489llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1490 Address ThisAddr,
1491 QualType SrcRecordTy,
1492 QualType DestTy) {
1493 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1494 auto *ClassDecl =
1495 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1496 llvm::Value *OffsetToTop;
1497 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1498 // Get the vtable pointer.
1499 llvm::Value *VTable =
1500 CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1501
1502 // Get the offset-to-top from the vtable.
1503 OffsetToTop =
1504 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1505 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1506 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1507 } else {
1508 llvm::Type *PtrDiffLTy =
1510
1511 // Get the vtable pointer.
1512 llvm::Value *VTable =
1513 CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1514
1515 // Get the offset-to-top from the vtable.
1516 OffsetToTop =
1517 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1518 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1519 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1520 }
1521 // Finally, add the offset to the pointer.
1522 llvm::Value *Value = ThisAddr.getPointer();
1524 Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1525 return CGF.Builder.CreateBitCast(Value, DestLTy);
1526}
1527
1528bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1529 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1530 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1531 Call->setDoesNotReturn();
1532 CGF.Builder.CreateUnreachable();
1533 return true;
1534}
1535
1536llvm::Value *
1537ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1538 Address This,
1539 const CXXRecordDecl *ClassDecl,
1540 const CXXRecordDecl *BaseClassDecl) {
1541 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1542 CharUnits VBaseOffsetOffset =
1543 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1544 BaseClassDecl);
1545 llvm::Value *VBaseOffsetPtr =
1546 CGF.Builder.CreateConstGEP1_64(
1547 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1548 "vbase.offset.ptr");
1549
1550 llvm::Value *VBaseOffset;
1551 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1552 VBaseOffsetPtr =
1553 CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1554 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1555 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1556 "vbase.offset");
1557 } else {
1558 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1559 CGM.PtrDiffTy->getPointerTo());
1560 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1561 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1562 }
1563 return VBaseOffset;
1564}
1565
1566void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1567 // Just make sure we're in sync with TargetCXXABI.
1568 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1569
1570 // The constructor used for constructing this as a base class;
1571 // ignores virtual bases.
1572 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1573
1574 // The constructor used for constructing this as a complete class;
1575 // constructs the virtual bases, then calls the base constructor.
1576 if (!D->getParent()->isAbstract()) {
1577 // We don't need to emit the complete ctor if the class is abstract.
1578 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1579 }
1580}
1581
1583ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1585 ASTContext &Context = getContext();
1586
1587 // All parameters are already in place except VTT, which goes after 'this'.
1588 // These are Clang types, so we don't need to worry about sret yet.
1589
1590 // Check if we need to add a VTT parameter (which has type void **).
1591 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1592 : GD.getDtorType() == Dtor_Base) &&
1593 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1594 ArgTys.insert(ArgTys.begin() + 1,
1595 Context.getPointerType(Context.VoidPtrTy));
1596 return AddedStructorArgCounts::prefix(1);
1597 }
1598 return AddedStructorArgCounts{};
1599}
1600
1601void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1602 // The destructor used for destructing this as a base class; ignores
1603 // virtual bases.
1604 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1605
1606 // The destructor used for destructing this as a most-derived class;
1607 // call the base destructor and then destructs any virtual bases.
1608 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1609
1610 // The destructor in a virtual table is always a 'deleting'
1611 // destructor, which calls the complete destructor and then uses the
1612 // appropriate operator delete.
1613 if (D->isVirtual())
1614 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1615}
1616
1617void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1618 QualType &ResTy,
1619 FunctionArgList &Params) {
1620 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1621 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1622
1623 // Check if we need a VTT parameter as well.
1624 if (NeedsVTTParameter(CGF.CurGD)) {
1625 ASTContext &Context = getContext();
1626
1627 // FIXME: avoid the fake decl
1628 QualType T = Context.getPointerType(Context.VoidPtrTy);
1629 auto *VTTDecl = ImplicitParamDecl::Create(
1630 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1632 Params.insert(Params.begin() + 1, VTTDecl);
1633 getStructorImplicitParamDecl(CGF) = VTTDecl;
1634 }
1635}
1636
1637void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1638 // Naked functions have no prolog.
1639 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1640 return;
1641
1642 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1643 /// adjustments are required, because they are all handled by thunks.
1644 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1645
1646 /// Initialize the 'vtt' slot if needed.
1647 if (getStructorImplicitParamDecl(CGF)) {
1648 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1649 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1650 }
1651
1652 /// If this is a function that the ABI specifies returns 'this', initialize
1653 /// the return slot to 'this' at the start of the function.
1654 ///
1655 /// Unlike the setting of return types, this is done within the ABI
1656 /// implementation instead of by clients of CGCXXABI because:
1657 /// 1) getThisValue is currently protected
1658 /// 2) in theory, an ABI could implement 'this' returns some other way;
1659 /// HasThisReturn only specifies a contract, not the implementation
1660 if (HasThisReturn(CGF.CurGD))
1661 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1662}
1663
1664CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1666 bool ForVirtualBase, bool Delegating) {
1667 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1668 return AddedStructorArgs{};
1669
1670 // Insert the implicit 'vtt' argument as the second argument.
1671 llvm::Value *VTT =
1672 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1673 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1674 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1675}
1676
1677llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1679 bool ForVirtualBase, bool Delegating) {
1680 GlobalDecl GD(DD, Type);
1681 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1682}
1683
1684void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1685 const CXXDestructorDecl *DD,
1686 CXXDtorType Type, bool ForVirtualBase,
1687 bool Delegating, Address This,
1688 QualType ThisTy) {
1689 GlobalDecl GD(DD, Type);
1690 llvm::Value *VTT =
1691 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1692 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1693
1695 if (getContext().getLangOpts().AppleKext &&
1696 Type != Dtor_Base && DD->isVirtual())
1698 else
1699 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1700
1701 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1702 nullptr);
1703}
1704
1705void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1706 const CXXRecordDecl *RD) {
1707 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1708 if (VTable->hasInitializer())
1709 return;
1710
1711 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1712 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1713 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1714 llvm::Constant *RTTI =
1715 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1716
1717 // Create and set the initializer.
1718 ConstantInitBuilder builder(CGM);
1719 auto components = builder.beginStruct();
1720 CGVT.createVTableInitializer(components, VTLayout, RTTI,
1721 llvm::GlobalValue::isLocalLinkage(Linkage));
1722 components.finishAndSetAsInitializer(VTable);
1723
1724 // Set the correct linkage.
1725 VTable->setLinkage(Linkage);
1726
1727 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1728 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1729
1730 // Set the right visibility.
1731 CGM.setGVProperties(VTable, RD);
1732
1733 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1734 // we will emit the typeinfo for the fundamental types. This is the
1735 // same behaviour as GCC.
1736 const DeclContext *DC = RD->getDeclContext();
1737 if (RD->getIdentifier() &&
1738 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1739 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1740 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1742 EmitFundamentalRTTIDescriptors(RD);
1743
1744 // Always emit type metadata on non-available_externally definitions, and on
1745 // available_externally definitions if we are performing whole program
1746 // devirtualization. For WPD we need the type metadata on all vtable
1747 // definitions to ensure we associate derived classes with base classes
1748 // defined in headers but with a strong definition only in a shared library.
1749 if (!VTable->isDeclarationForLinker() ||
1750 CGM.getCodeGenOpts().WholeProgramVTables) {
1751 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1752 // For available_externally definitions, add the vtable to
1753 // @llvm.compiler.used so that it isn't deleted before whole program
1754 // analysis.
1755 if (VTable->isDeclarationForLinker()) {
1756 assert(CGM.getCodeGenOpts().WholeProgramVTables);
1757 CGM.addCompilerUsedGlobal(VTable);
1758 }
1759 }
1760
1761 if (VTContext.isRelativeLayout()) {
1762 CGVT.RemoveHwasanMetadata(VTable);
1763 if (!VTable->isDSOLocal())
1764 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1765 }
1766}
1767
1768bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1769 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1770 if (Vptr.NearestVBase == nullptr)
1771 return false;
1772 return NeedsVTTParameter(CGF.CurGD);
1773}
1774
1775llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1776 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1777 const CXXRecordDecl *NearestVBase) {
1778
1779 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1780 NeedsVTTParameter(CGF.CurGD)) {
1781 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1782 NearestVBase);
1783 }
1784 return getVTableAddressPoint(Base, VTableClass);
1785}
1786
1787llvm::Constant *
1788ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1789 const CXXRecordDecl *VTableClass) {
1790 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1791
1792 // Find the appropriate vtable within the vtable group, and the address point
1793 // within that vtable.
1795 CGM.getItaniumVTableContext()
1796 .getVTableLayout(VTableClass)
1797 .getAddressPoint(Base);
1798 llvm::Value *Indices[] = {
1799 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1800 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1801 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1802 };
1803
1804 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1805 Indices, /*InBounds=*/true,
1806 /*InRangeIndex=*/1);
1807}
1808
1809// Check whether all the non-inline virtual methods for the class have the
1810// specified attribute.
1811template <typename T>
1813 bool FoundNonInlineVirtualMethodWithAttr = false;
1814 for (const auto *D : RD->noload_decls()) {
1815 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1816 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1817 FD->doesThisDeclarationHaveABody())
1818 continue;
1819 if (!D->hasAttr<T>())
1820 return false;
1821 FoundNonInlineVirtualMethodWithAttr = true;
1822 }
1823 }
1824
1825 // We didn't find any non-inline virtual methods missing the attribute. We
1826 // will return true when we found at least one non-inline virtual with the
1827 // attribute. (This lets our caller know that the attribute needs to be
1828 // propagated up to the vtable.)
1829 return FoundNonInlineVirtualMethodWithAttr;
1830}
1831
1832llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1833 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1834 const CXXRecordDecl *NearestVBase) {
1835 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1836 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1837
1838 // Get the secondary vpointer index.
1839 uint64_t VirtualPointerIndex =
1840 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1841
1842 /// Load the VTT.
1843 llvm::Value *VTT = CGF.LoadCXXVTT();
1844 if (VirtualPointerIndex)
1845 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1846 CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1847
1848 // And load the address point from the VTT.
1849 return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1850 CGF.getPointerAlign());
1851}
1852
1853llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1854 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1855 return getVTableAddressPoint(Base, VTableClass);
1856}
1857
1858llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1859 CharUnits VPtrOffset) {
1860 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1861
1862 llvm::GlobalVariable *&VTable = VTables[RD];
1863 if (VTable)
1864 return VTable;
1865
1866 // Queue up this vtable for possible deferred emission.
1867 CGM.addDeferredVTable(RD);
1868
1869 SmallString<256> Name;
1870 llvm::raw_svector_ostream Out(Name);
1871 getMangleContext().mangleCXXVTable(RD, Out);
1872
1873 const VTableLayout &VTLayout =
1874 CGM.getItaniumVTableContext().getVTableLayout(RD);
1875 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1876
1877 // Use pointer alignment for the vtable. Otherwise we would align them based
1878 // on the size of the initializer which doesn't make sense as only single
1879 // values are read.
1880 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1881 ? 32
1882 : CGM.getTarget().getPointerAlign(LangAS::Default);
1883
1884 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1885 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1886 getContext().toCharUnitsFromBits(PAlign).getAsAlign());
1887 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1888
1889 // In MS C++ if you have a class with virtual functions in which you are using
1890 // selective member import/export, then all virtual functions must be exported
1891 // unless they are inline, otherwise a link error will result. To match this
1892 // behavior, for such classes, we dllimport the vtable if it is defined
1893 // externally and all the non-inline virtual methods are marked dllimport, and
1894 // we dllexport the vtable if it is defined in this TU and all the non-inline
1895 // virtual methods are marked dllexport.
1896 if (CGM.getTarget().hasPS4DLLImportExport()) {
1897 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1898 if (CGM.getVTables().isVTableExternal(RD)) {
1899 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1900 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1901 } else {
1902 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1903 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1904 }
1905 }
1906 }
1907 CGM.setGVProperties(VTable, RD);
1908
1909 return VTable;
1910}
1911
1912CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1913 GlobalDecl GD,
1914 Address This,
1915 llvm::Type *Ty,
1916 SourceLocation Loc) {
1917 llvm::Type *TyPtr = Ty->getPointerTo();
1918 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1919 llvm::Value *VTable = CGF.GetVTablePtr(
1920 This, TyPtr->getPointerTo(), MethodDecl->getParent());
1921
1922 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1923 llvm::Value *VFunc;
1924 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1925 VFunc = CGF.EmitVTableTypeCheckedLoad(
1926 MethodDecl->getParent(), VTable, TyPtr,
1927 VTableIndex *
1928 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
1929 8);
1930 } else {
1931 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1932
1933 llvm::Value *VFuncLoad;
1934 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1935 VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1936 llvm::Value *Load = CGF.Builder.CreateCall(
1937 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1938 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1939 VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1940 } else {
1941 VTable =
1942 CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1943 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1944 TyPtr, VTable, VTableIndex, "vfn");
1945 VFuncLoad =
1946 CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
1947 CGF.getPointerAlign());
1948 }
1949
1950 // Add !invariant.load md to virtual function load to indicate that
1951 // function didn't change inside vtable.
1952 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1953 // help in devirtualization because it will only matter if we will have 2
1954 // the same virtual function loads from the same vtable load, which won't
1955 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1956 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1957 CGM.getCodeGenOpts().StrictVTablePointers) {
1958 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1959 VFuncLoadInstr->setMetadata(
1960 llvm::LLVMContext::MD_invariant_load,
1961 llvm::MDNode::get(CGM.getLLVMContext(),
1963 }
1964 }
1965 VFunc = VFuncLoad;
1966 }
1967
1968 CGCallee Callee(GD, VFunc);
1969 return Callee;
1970}
1971
1972llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1973 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1974 Address This, DeleteOrMemberCallExpr E) {
1975 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1976 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1977 assert((CE != nullptr) ^ (D != nullptr));
1978 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1979 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1980
1981 GlobalDecl GD(Dtor, DtorType);
1982 const CGFunctionInfo *FInfo =
1983 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1984 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1985 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1986
1987 QualType ThisTy;
1988 if (CE) {
1989 ThisTy = CE->getObjectType();
1990 } else {
1991 ThisTy = D->getDestroyedType();
1992 }
1993
1994 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1995 QualType(), nullptr);
1996 return nullptr;
1997}
1998
1999void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2000 CodeGenVTables &VTables = CGM.getVTables();
2001 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2002 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2003}
2004
2005bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2006 const CXXRecordDecl *RD) const {
2007 // We don't emit available_externally vtables if we are in -fapple-kext mode
2008 // because kext mode does not permit devirtualization.
2009 if (CGM.getLangOpts().AppleKext)
2010 return false;
2011
2012 // If the vtable is hidden then it is not safe to emit an available_externally
2013 // copy of vtable.
2014 if (isVTableHidden(RD))
2015 return false;
2016
2017 if (CGM.getCodeGenOpts().ForceEmitVTables)
2018 return true;
2019
2020 // If we don't have any not emitted inline virtual function then we are safe
2021 // to emit an available_externally copy of vtable.
2022 // FIXME we can still emit a copy of the vtable if we
2023 // can emit definition of the inline functions.
2024 if (hasAnyUnusedVirtualInlineFunction(RD))
2025 return false;
2026
2027 // For a class with virtual bases, we must also be able to speculatively
2028 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2029 // the vtable" and "can emit the VTT". For a base subobject, this means we
2030 // need to be able to emit non-virtual base vtables.
2031 if (RD->getNumVBases()) {
2032 for (const auto &B : RD->bases()) {
2033 auto *BRD = B.getType()->getAsCXXRecordDecl();
2034 assert(BRD && "no class for base specifier");
2035 if (B.isVirtual() || !BRD->isDynamicClass())
2036 continue;
2037 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2038 return false;
2039 }
2040 }
2041
2042 return true;
2043}
2044
2045bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2046 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2047 return false;
2048
2049 // For a complete-object vtable (or more specifically, for the VTT), we need
2050 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2051 for (const auto &B : RD->vbases()) {
2052 auto *BRD = B.getType()->getAsCXXRecordDecl();
2053 assert(BRD && "no class for base specifier");
2054 if (!BRD->isDynamicClass())
2055 continue;
2056 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2057 return false;
2058 }
2059
2060 return true;
2061}
2063 Address InitialPtr,
2064 int64_t NonVirtualAdjustment,
2065 int64_t VirtualAdjustment,
2066 bool IsReturnAdjustment) {
2067 if (!NonVirtualAdjustment && !VirtualAdjustment)
2068 return InitialPtr.getPointer();
2069
2070 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2071
2072 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2073 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2075 CharUnits::fromQuantity(NonVirtualAdjustment));
2076 }
2077
2078 // Perform the virtual adjustment if we have one.
2079 llvm::Value *ResultPtr;
2080 if (VirtualAdjustment) {
2081 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2082 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2083
2084 llvm::Value *Offset;
2085 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2086 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2088 // Load the adjustment offset from the vtable as a 32-bit int.
2089 OffsetPtr =
2090 CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2091 Offset =
2092 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2094 } else {
2095 llvm::Type *PtrDiffTy =
2097
2098 OffsetPtr =
2099 CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2100
2101 // Load the adjustment offset from the vtable.
2102 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2103 CGF.getPointerAlign());
2104 }
2105 // Adjust our pointer.
2106 ResultPtr = CGF.Builder.CreateInBoundsGEP(
2107 V.getElementType(), V.getPointer(), Offset);
2108 } else {
2109 ResultPtr = V.getPointer();
2110 }
2111
2112 // In a derived-to-base conversion, the non-virtual adjustment is
2113 // applied second.
2114 if (NonVirtualAdjustment && IsReturnAdjustment) {
2115 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2116 NonVirtualAdjustment);
2117 }
2118
2119 // Cast back to the original type.
2120 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2121}
2122
2123llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2124 Address This,
2125 const ThisAdjustment &TA) {
2126 return performTypeAdjustment(CGF, This, TA.NonVirtual,
2128 /*IsReturnAdjustment=*/false);
2129}
2130
2131llvm::Value *
2132ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2133 const ReturnAdjustment &RA) {
2134 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2136 /*IsReturnAdjustment=*/true);
2137}
2138
2139void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2140 RValue RV, QualType ResultType) {
2141 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2142 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2143
2144 // Destructor thunks in the ARM ABI have indeterminate results.
2145 llvm::Type *T = CGF.ReturnValue.getElementType();
2146 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2147 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2148}
2149
2150/************************** Array allocation cookies **************************/
2151
2152CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2153 // The array cookie is a size_t; pad that up to the element alignment.
2154 // The cookie is actually right-justified in that space.
2155 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2156 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2157}
2158
2159Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2160 Address NewPtr,
2161 llvm::Value *NumElements,
2162 const CXXNewExpr *expr,
2163 QualType ElementType) {
2164 assert(requiresArrayCookie(expr));
2165
2166 unsigned AS = NewPtr.getAddressSpace();
2167
2168 ASTContext &Ctx = getContext();
2169 CharUnits SizeSize = CGF.getSizeSize();
2170
2171 // The size of the cookie.
2172 CharUnits CookieSize =
2173 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2174 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2175
2176 // Compute an offset to the cookie.
2177 Address CookiePtr = NewPtr;
2178 CharUnits CookieOffset = CookieSize - SizeSize;
2179 if (!CookieOffset.isZero())
2180 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2181
2182 // Write the number of elements into the appropriate slot.
2183 Address NumElementsPtr =
2184 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2185 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2186
2187 // Handle the array cookie specially in ASan.
2188 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2189 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2190 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2191 // The store to the CookiePtr does not need to be instrumented.
2192 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2193 llvm::FunctionType *FTy =
2194 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2195 llvm::FunctionCallee F =
2196 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2197 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2198 }
2199
2200 // Finally, compute a pointer to the actual data buffer by skipping
2201 // over the cookie completely.
2202 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2203}
2204
2205llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2206 Address allocPtr,
2207 CharUnits cookieSize) {
2208 // The element size is right-justified in the cookie.
2209 Address numElementsPtr = allocPtr;
2210 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2211 if (!numElementsOffset.isZero())
2212 numElementsPtr =
2213 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2214
2215 unsigned AS = allocPtr.getAddressSpace();
2216 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2217 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2218 return CGF.Builder.CreateLoad(numElementsPtr);
2219 // In asan mode emit a function call instead of a regular load and let the
2220 // run-time deal with it: if the shadow is properly poisoned return the
2221 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2222 // We can't simply ignore this load using nosanitize metadata because
2223 // the metadata may be lost.
2224 llvm::FunctionType *FTy =
2225 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2226 llvm::FunctionCallee F =
2227 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2228 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2229}
2230
2231CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2232 // ARM says that the cookie is always:
2233 // struct array_cookie {
2234 // std::size_t element_size; // element_size != 0
2235 // std::size_t element_count;
2236 // };
2237 // But the base ABI doesn't give anything an alignment greater than
2238 // 8, so we can dismiss this as typical ABI-author blindness to
2239 // actual language complexity and round up to the element alignment.
2240 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2241 CGM.getContext().getTypeAlignInChars(elementType));
2242}
2243
2244Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2245 Address newPtr,
2246 llvm::Value *numElements,
2247 const CXXNewExpr *expr,
2248 QualType elementType) {
2249 assert(requiresArrayCookie(expr));
2250
2251 // The cookie is always at the start of the buffer.
2252 Address cookie = newPtr;
2253
2254 // The first element is the element size.
2255 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2256 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2257 getContext().getTypeSizeInChars(elementType).getQuantity());
2258 CGF.Builder.CreateStore(elementSize, cookie);
2259
2260 // The second element is the element count.
2261 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2262 CGF.Builder.CreateStore(numElements, cookie);
2263
2264 // Finally, compute a pointer to the actual data buffer by skipping
2265 // over the cookie completely.
2266 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2267 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2268}
2269
2270llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2271 Address allocPtr,
2272 CharUnits cookieSize) {
2273 // The number of elements is at offset sizeof(size_t) relative to
2274 // the allocated pointer.
2275 Address numElementsPtr
2276 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2277
2278 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2279 return CGF.Builder.CreateLoad(numElementsPtr);
2280}
2281
2282/*********************** Static local initialization **************************/
2283
2284static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2285 llvm::PointerType *GuardPtrTy) {
2286 // int __cxa_guard_acquire(__guard *guard_object);
2287 llvm::FunctionType *FTy =
2288 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2289 GuardPtrTy, /*isVarArg=*/false);
2290 return CGM.CreateRuntimeFunction(
2291 FTy, "__cxa_guard_acquire",
2292 llvm::AttributeList::get(CGM.getLLVMContext(),
2293 llvm::AttributeList::FunctionIndex,
2294 llvm::Attribute::NoUnwind));
2295}
2296
2297static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2298 llvm::PointerType *GuardPtrTy) {
2299 // void __cxa_guard_release(__guard *guard_object);
2300 llvm::FunctionType *FTy =
2301 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2302 return CGM.CreateRuntimeFunction(
2303 FTy, "__cxa_guard_release",
2304 llvm::AttributeList::get(CGM.getLLVMContext(),
2305 llvm::AttributeList::FunctionIndex,
2306 llvm::Attribute::NoUnwind));
2307}
2308
2309static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2310 llvm::PointerType *GuardPtrTy) {
2311 // void __cxa_guard_abort(__guard *guard_object);
2312 llvm::FunctionType *FTy =
2313 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2314 return CGM.CreateRuntimeFunction(
2315 FTy, "__cxa_guard_abort",
2316 llvm::AttributeList::get(CGM.getLLVMContext(),
2317 llvm::AttributeList::FunctionIndex,
2318 llvm::Attribute::NoUnwind));
2319}
2320
2321namespace {
2322 struct CallGuardAbort final : EHScopeStack::Cleanup {
2323 llvm::GlobalVariable *Guard;
2324 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2325
2326 void Emit(CodeGenFunction &CGF, Flags flags) override {
2327 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2328 Guard);
2329 }
2330 };
2331}
2332
2333/// The ARM code here follows the Itanium code closely enough that we
2334/// just special-case it at particular places.
2335void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2336 const VarDecl &D,
2337 llvm::GlobalVariable *var,
2338 bool shouldPerformInit) {
2339 CGBuilderTy &Builder = CGF.Builder;
2340
2341 // Inline variables that weren't instantiated from variable templates have
2342 // partially-ordered initialization within their translation unit.
2343 bool NonTemplateInline =
2344 D.isInline() &&
2346
2347 // We only need to use thread-safe statics for local non-TLS variables and
2348 // inline variables; other global initialization is always single-threaded
2349 // or (through lazy dynamic loading in multiple threads) unsequenced.
2350 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2351 (D.isLocalVarDecl() || NonTemplateInline) &&
2352 !D.getTLSKind();
2353
2354 // If we have a global variable with internal linkage and thread-safe statics
2355 // are disabled, we can just let the guard variable be of type i8.
2356 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2357
2358 llvm::IntegerType *guardTy;
2359 CharUnits guardAlignment;
2360 if (useInt8GuardVariable) {
2361 guardTy = CGF.Int8Ty;
2362 guardAlignment = CharUnits::One();
2363 } else {
2364 // Guard variables are 64 bits in the generic ABI and size width on ARM
2365 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2366 if (UseARMGuardVarABI) {
2367 guardTy = CGF.SizeTy;
2368 guardAlignment = CGF.getSizeAlign();
2369 } else {
2370 guardTy = CGF.Int64Ty;
2371 guardAlignment =
2372 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
2373 }
2374 }
2375 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2376 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2377
2378 // Create the guard variable if we don't already have it (as we
2379 // might if we're double-emitting this function body).
2380 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2381 if (!guard) {
2382 // Mangle the name for the guard.
2383 SmallString<256> guardName;
2384 {
2385 llvm::raw_svector_ostream out(guardName);
2386 getMangleContext().mangleStaticGuardVariable(&D, out);
2387 }
2388
2389 // Create the guard variable with a zero-initializer.
2390 // Just absorb linkage, visibility and dll storage class from the guarded
2391 // variable.
2392 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2393 false, var->getLinkage(),
2394 llvm::ConstantInt::get(guardTy, 0),
2395 guardName.str());
2396 guard->setDSOLocal(var->isDSOLocal());
2397 guard->setVisibility(var->getVisibility());
2398 guard->setDLLStorageClass(var->getDLLStorageClass());
2399 // If the variable is thread-local, so is its guard variable.
2400 guard->setThreadLocalMode(var->getThreadLocalMode());
2401 guard->setAlignment(guardAlignment.getAsAlign());
2402
2403 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2404 // group as the associated data object." In practice, this doesn't work for
2405 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2406 llvm::Comdat *C = var->getComdat();
2407 if (!D.isLocalVarDecl() && C &&
2408 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2409 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2410 guard->setComdat(C);
2411 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2412 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2413 }
2414
2415 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2416 }
2417
2418 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2419
2420 // Test whether the variable has completed initialization.
2421 //
2422 // Itanium C++ ABI 3.3.2:
2423 // The following is pseudo-code showing how these functions can be used:
2424 // if (obj_guard.first_byte == 0) {
2425 // if ( __cxa_guard_acquire (&obj_guard) ) {
2426 // try {
2427 // ... initialize the object ...;
2428 // } catch (...) {
2429 // __cxa_guard_abort (&obj_guard);
2430 // throw;
2431 // }
2432 // ... queue object destructor with __cxa_atexit() ...;
2433 // __cxa_guard_release (&obj_guard);
2434 // }
2435 // }
2436 //
2437 // If threadsafe statics are enabled, but we don't have inline atomics, just
2438 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2439 // actually inline, and the user might not expect calls to __atomic libcalls.
2440
2441 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2442 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2443 if (!threadsafe || MaxInlineWidthInBits) {
2444 // Load the first byte of the guard variable.
2445 llvm::LoadInst *LI =
2446 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2447
2448 // Itanium ABI:
2449 // An implementation supporting thread-safety on multiprocessor
2450 // systems must also guarantee that references to the initialized
2451 // object do not occur before the load of the initialization flag.
2452 //
2453 // In LLVM, we do this by marking the load Acquire.
2454 if (threadsafe)
2455 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2456
2457 // For ARM, we should only check the first bit, rather than the entire byte:
2458 //
2459 // ARM C++ ABI 3.2.3.1:
2460 // To support the potential use of initialization guard variables
2461 // as semaphores that are the target of ARM SWP and LDREX/STREX
2462 // synchronizing instructions we define a static initialization
2463 // guard variable to be a 4-byte aligned, 4-byte word with the
2464 // following inline access protocol.
2465 // #define INITIALIZED 1
2466 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2467 // if (__cxa_guard_acquire(&obj_guard))
2468 // ...
2469 // }
2470 //
2471 // and similarly for ARM64:
2472 //
2473 // ARM64 C++ ABI 3.2.2:
2474 // This ABI instead only specifies the value bit 0 of the static guard
2475 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2476 // variable is not initialized and 1 when it is.
2477 llvm::Value *V =
2478 (UseARMGuardVarABI && !useInt8GuardVariable)
2479 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2480 : LI;
2481 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2482
2483 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2484
2485 // Check if the first byte of the guard variable is zero.
2486 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2487 CodeGenFunction::GuardKind::VariableGuard, &D);
2488
2489 CGF.EmitBlock(InitCheckBlock);
2490 }
2491
2492 // The semantics of dynamic initialization of variables with static or thread
2493 // storage duration depends on whether they are declared at block-scope. The
2494 // initialization of such variables at block-scope can be aborted with an
2495 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2496 // to their initialization has undefined behavior (also per C++20
2497 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2498 // lead to termination (per C++20 [except.terminate]p1), and recursive
2499 // references to the variables are governed only by the lifetime rules (per
2500 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2501 // long as they avoid touching memory. As a result, block-scope variables must
2502 // not be marked as initialized until after initialization completes (unless
2503 // the mark is reverted following an exception), but non-block-scope variables
2504 // must be marked prior to initialization so that recursive accesses during
2505 // initialization do not restart initialization.
2506
2507 // Variables used when coping with thread-safe statics and exceptions.
2508 if (threadsafe) {
2509 // Call __cxa_guard_acquire.
2510 llvm::Value *V
2511 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2512
2513 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2514
2515 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2516 InitBlock, EndBlock);
2517
2518 // Call __cxa_guard_abort along the exceptional edge.
2519 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2520
2521 CGF.EmitBlock(InitBlock);
2522 } else if (!D.isLocalVarDecl()) {
2523 // For non-local variables, store 1 into the first byte of the guard
2524 // variable before the object initialization begins so that references
2525 // to the variable during initialization don't restart initialization.
2526 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2527 Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2528 }
2529
2530 // Emit the initializer and add a global destructor if appropriate.
2531 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2532
2533 if (threadsafe) {
2534 // Pop the guard-abort cleanup if we pushed one.
2535 CGF.PopCleanupBlock();
2536
2537 // Call __cxa_guard_release. This cannot throw.
2538 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2539 guardAddr.getPointer());
2540 } else if (D.isLocalVarDecl()) {
2541 // For local variables, store 1 into the first byte of the guard variable
2542 // after the object initialization completes so that initialization is
2543 // retried if initialization is interrupted by an exception.
2544 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2545 Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2546 }
2547
2548 CGF.EmitBlock(EndBlock);
2549}
2550
2551/// Register a global destructor using __cxa_atexit.
2553 llvm::FunctionCallee dtor,
2554 llvm::Constant *addr, bool TLS) {
2555 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2556 "unexpected call to emitGlobalDtorWithCXAAtExit");
2557 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2558 "__cxa_atexit is disabled");
2559 const char *Name = "__cxa_atexit";
2560 if (TLS) {
2561 const llvm::Triple &T = CGF.getTarget().getTriple();
2562 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2563 }
2564
2565 // We're assuming that the destructor function is something we can
2566 // reasonably call with the default CC. Go ahead and cast it to the
2567 // right prototype.
2568 llvm::Type *dtorTy =
2569 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2570
2571 // Preserve address space of addr.
2572 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2573 auto AddrInt8PtrTy =
2574 AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2575
2576 // Create a variable that binds the atexit to this shared object.
2577 llvm::Constant *handle =
2578 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2579 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2580 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2581
2582 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2583 llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2584 llvm::FunctionType *atexitTy =
2585 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2586
2587 // Fetch the actual function.
2588 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2589 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2590 fn->setDoesNotThrow();
2591
2592 if (!addr)
2593 // addr is null when we are trying to register a dtor annotated with
2594 // __attribute__((destructor)) in a constructor function. Using null here is
2595 // okay because this argument is just passed back to the destructor
2596 // function.
2597 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2598
2599 llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2600 cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2601 llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2602 handle};
2603 CGF.EmitNounwindRuntimeCall(atexit, args);
2604}
2605
2607 StringRef FnName) {
2608 // Create a function that registers/unregisters destructors that have the same
2609 // priority.
2610 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2611 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2612 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2613
2614 return GlobalInitOrCleanupFn;
2615}
2616
2617void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2618 for (const auto &I : DtorsUsingAtExit) {
2619 int Priority = I.first;
2620 std::string GlobalCleanupFnName =
2621 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2622
2623 llvm::Function *GlobalCleanupFn =
2624 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2625
2626 CodeGenFunction CGF(*this);
2627 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2628 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2631
2632 // Get the destructor function type, void(*)(void).
2633 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2634 llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2635
2636 // Destructor functions are run/unregistered in non-ascending
2637 // order of their priorities.
2638 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2639 auto itv = Dtors.rbegin();
2640 while (itv != Dtors.rend()) {
2641 llvm::Function *Dtor = *itv;
2642
2643 // We're assuming that the destructor function is something we can
2644 // reasonably call with the correct CC. Go ahead and cast it to the
2645 // right prototype.
2646 llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2647 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2648 llvm::Value *NeedsDestruct =
2649 CGF.Builder.CreateIsNull(V, "needs_destruct");
2650
2651 llvm::BasicBlock *DestructCallBlock =
2652 CGF.createBasicBlock("destruct.call");
2653 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2654 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2655 // Check if unatexit returns a value of 0. If it does, jump to
2656 // DestructCallBlock, otherwise jump to EndBlock directly.
2657 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2658
2659 CGF.EmitBlock(DestructCallBlock);
2660
2661 // Emit the call to casted Dtor.
2662 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2663 // Make sure the call and the callee agree on calling convention.
2664 CI->setCallingConv(Dtor->getCallingConv());
2665
2666 CGF.EmitBlock(EndBlock);
2667
2668 itv++;
2669 }
2670
2671 CGF.FinishFunction();
2672 AddGlobalDtor(GlobalCleanupFn, Priority);
2673 }
2674}
2675
2676void CodeGenModule::registerGlobalDtorsWithAtExit() {
2677 for (const auto &I : DtorsUsingAtExit) {
2678 int Priority = I.first;
2679 std::string GlobalInitFnName =
2680 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2681 llvm::Function *GlobalInitFn =
2682 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2683
2684 CodeGenFunction CGF(*this);
2685 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2686 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2689
2690 // Since constructor functions are run in non-descending order of their
2691 // priorities, destructors are registered in non-descending order of their
2692 // priorities, and since destructor functions are run in the reverse order
2693 // of their registration, destructor functions are run in non-ascending
2694 // order of their priorities.
2695 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2696 for (auto *Dtor : Dtors) {
2697 // Register the destructor function calling __cxa_atexit if it is
2698 // available. Otherwise fall back on calling atexit.
2699 if (getCodeGenOpts().CXAAtExit) {
2700 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2701 } else {
2702 // Get the destructor function type, void(*)(void).
2703 llvm::Type *dtorTy =
2704 llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2705
2706 // We're assuming that the destructor function is something we can
2707 // reasonably call with the correct CC. Go ahead and cast it to the
2708 // right prototype.
2710 llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2711 }
2712 }
2713
2714 CGF.FinishFunction();
2715 AddGlobalCtor(GlobalInitFn, Priority);
2716 }
2717
2718 if (getCXXABI().useSinitAndSterm())
2719 unregisterGlobalDtorsWithUnAtExit();
2720}
2721
2722/// Register a global destructor as best as we know how.
2723void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2724 llvm::FunctionCallee dtor,
2725 llvm::Constant *addr) {
2726 if (D.isNoDestroy(CGM.getContext()))
2727 return;
2728
2729 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2730 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2731 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2732 // We can always use __cxa_thread_atexit.
2733 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2734 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2735
2736 // In Apple kexts, we want to add a global destructor entry.
2737 // FIXME: shouldn't this be guarded by some variable?
2738 if (CGM.getLangOpts().AppleKext) {
2739 // Generate a global destructor entry.
2740 return CGM.AddCXXDtorEntry(dtor, addr);
2741 }
2742
2743 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2744}
2745
2748 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2749 // Darwin prefers to have references to thread local variables to go through
2750 // the thread wrapper instead of directly referencing the backing variable.
2751 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2752 CGM.getTarget().getTriple().isOSDarwin();
2753}
2754
2755/// Get the appropriate linkage for the wrapper function. This is essentially
2756/// the weak form of the variable's linkage; every translation unit which needs
2757/// the wrapper emits a copy, and we want the linker to merge them.
2758static llvm::GlobalValue::LinkageTypes
2760 llvm::GlobalValue::LinkageTypes VarLinkage =
2761 CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2762
2763 // For internal linkage variables, we don't need an external or weak wrapper.
2764 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2765 return VarLinkage;
2766
2767 // If the thread wrapper is replaceable, give it appropriate linkage.
2768 if (isThreadWrapperReplaceable(VD, CGM))
2769 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2770 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2771 return VarLinkage;
2772 return llvm::GlobalValue::WeakODRLinkage;
2773}
2774
2775llvm::Function *
2776ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2777 llvm::Value *Val) {
2778 // Mangle the name for the thread_local wrapper function.
2779 SmallString<256> WrapperName;
2780 {
2781 llvm::raw_svector_ostream Out(WrapperName);
2782 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2783 }
2784
2785 // FIXME: If VD is a definition, we should regenerate the function attributes
2786 // before returning.
2787 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2788 return cast<llvm::Function>(V);
2789
2790 QualType RetQT = VD->getType();
2791 if (RetQT->isReferenceType())
2792 RetQT = RetQT.getNonReferenceType();
2793
2794 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2795 getContext().getPointerType(RetQT), FunctionArgList());
2796
2797 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2798 llvm::Function *Wrapper =
2799 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2800 WrapperName.str(), &CGM.getModule());
2801
2802 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2803 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2804
2805 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2806
2807 // Always resolve references to the wrapper at link time.
2808 if (!Wrapper->hasLocalLinkage())
2809 if (!isThreadWrapperReplaceable(VD, CGM) ||
2810 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2811 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2813 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2814
2815 if (isThreadWrapperReplaceable(VD, CGM)) {
2816 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2817 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2818 }
2819
2820 ThreadWrappers.push_back({VD, Wrapper});
2821 return Wrapper;
2822}
2823
2824void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2825 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2826 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2827 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2828 llvm::Function *InitFunc = nullptr;
2829
2830 // Separate initializers into those with ordered (or partially-ordered)
2831 // initialization and those with unordered initialization.
2833 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2834 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2836 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2837 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2838 CXXThreadLocalInits[I];
2839 else
2840 OrderedInits.push_back(CXXThreadLocalInits[I]);
2841 }
2842
2843 if (!OrderedInits.empty()) {
2844 // Generate a guarded initialization function.
2845 llvm::FunctionType *FTy =
2846 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2848 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2850 /*TLS=*/true);
2851 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2852 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2853 llvm::GlobalVariable::InternalLinkage,
2854 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2855 Guard->setThreadLocal(true);
2856 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2857
2858 CharUnits GuardAlign = CharUnits::One();
2859 Guard->setAlignment(GuardAlign.getAsAlign());
2860
2862 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2863 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2864 if (CGM.getTarget().getTriple().isOSDarwin()) {
2865 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2866 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2867 }
2868 }
2869
2870 // Create declarations for thread wrappers for all thread-local variables
2871 // with non-discardable definitions in this translation unit.
2872 for (const VarDecl *VD : CXXThreadLocals) {
2873 if (VD->hasDefinition() &&
2874 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2875 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2876 getOrCreateThreadLocalWrapper(VD, GV);
2877 }
2878 }
2879
2880 // Emit all referenced thread wrappers.
2881 for (auto VDAndWrapper : ThreadWrappers) {
2882 const VarDecl *VD = VDAndWrapper.first;
2883 llvm::GlobalVariable *Var =
2884 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2885 llvm::Function *Wrapper = VDAndWrapper.second;
2886
2887 // Some targets require that all access to thread local variables go through
2888 // the thread wrapper. This means that we cannot attempt to create a thread
2889 // wrapper or a thread helper.
2890 if (!VD->hasDefinition()) {
2891 if (isThreadWrapperReplaceable(VD, CGM)) {
2892 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2893 continue;
2894 }
2895
2896 // If this isn't a TU in which this variable is defined, the thread
2897 // wrapper is discardable.
2898 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2899 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2900 }
2901
2902 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2903
2904 // Mangle the name for the thread_local initialization function.
2905 SmallString<256> InitFnName;
2906 {
2907 llvm::raw_svector_ostream Out(InitFnName);
2908 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2909 }
2910
2911 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2912
2913 // If we have a definition for the variable, emit the initialization
2914 // function as an alias to the global Init function (if any). Otherwise,
2915 // produce a declaration of the initialization function.
2916 llvm::GlobalValue *Init = nullptr;
2917 bool InitIsInitFunc = false;
2918 bool HasConstantInitialization = false;
2919 if (!usesThreadWrapperFunction(VD)) {
2920 HasConstantInitialization = true;
2921 } else if (VD->hasDefinition()) {
2922 InitIsInitFunc = true;
2923 llvm::Function *InitFuncToUse = InitFunc;
2925 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2926 if (InitFuncToUse)
2927 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2928 InitFuncToUse);
2929 } else {
2930 // Emit a weak global function referring to the initialization function.
2931 // This function will not exist if the TU defining the thread_local
2932 // variable in question does not need any dynamic initialization for
2933 // its thread_local variables.
2934 Init = llvm::Function::Create(InitFnTy,
2935 llvm::GlobalVariable::ExternalWeakLinkage,
2936 InitFnName.str(), &CGM.getModule());
2939 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2940 }
2941
2942 if (Init) {
2943 Init->setVisibility(Var->getVisibility());
2944 // Don't mark an extern_weak function DSO local on windows.
2945 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2946 Init->setDSOLocal(Var->isDSOLocal());
2947 }
2948
2949 llvm::LLVMContext &Context = CGM.getModule().getContext();
2950
2951 // The linker on AIX is not happy with missing weak symbols. However,
2952 // other TUs will not know whether the initialization routine exists
2953 // so create an empty, init function to satisfy the linker.
2954 // This is needed whenever a thread wrapper function is not used, and
2955 // also when the symbol is weak.
2956 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
2957 isEmittedWithConstantInitializer(VD, true) &&
2958 !mayNeedDestruction(VD)) {
2959 // Init should be null. If it were non-null, then the logic above would
2960 // either be defining the function to be an alias or declaring the
2961 // function with the expectation that the definition of the variable
2962 // is elsewhere.
2963 assert(Init == nullptr && "Expected Init to be null.");
2964
2965 llvm::Function *Func = llvm::Function::Create(
2966 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2969 cast<llvm::Function>(Func),
2970 /*IsThunk=*/false);
2971 // Create a function body that just returns
2972 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2973 CGBuilderTy Builder(CGM, Entry);
2974 Builder.CreateRetVoid();
2975 }
2976
2977 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2978 CGBuilderTy Builder(CGM, Entry);
2979 if (HasConstantInitialization) {
2980 // No dynamic initialization to invoke.
2981 } else if (InitIsInitFunc) {
2982 if (Init) {
2983 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2984 if (isThreadWrapperReplaceable(VD, CGM)) {
2985 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2986 llvm::Function *Fn =
2987 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2988 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2989 }
2990 }
2991 } else if (CGM.getTriple().isOSAIX()) {
2992 // On AIX, except if constinit and also neither of class type or of
2993 // (possibly multi-dimensional) array of class type, thread_local vars
2994 // will have init routines regardless of whether they are
2995 // const-initialized. Since the routine is guaranteed to exist, we can
2996 // unconditionally call it without testing for its existance. This
2997 // avoids potentially unresolved weak symbols which the AIX linker
2998 // isn't happy with.
2999 Builder.CreateCall(InitFnTy, Init);
3000 } else {
3001 // Don't know whether we have an init function. Call it if it exists.
3002 llvm::Value *Have = Builder.CreateIsNotNull(Init);
3003 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3004 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3005 Builder.CreateCondBr(Have, InitBB, ExitBB);
3006
3007 Builder.SetInsertPoint(InitBB);
3008 Builder.CreateCall(InitFnTy, Init);
3009 Builder.CreateBr(ExitBB);
3010
3011 Builder.SetInsertPoint(ExitBB);
3012 }
3013
3014 // For a reference, the result of the wrapper function is a pointer to
3015 // the referenced object.
3016 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
3017
3018 if (VD->getType()->isReferenceType()) {
3019 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3020 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3021 }
3022 if (Val->getType() != Wrapper->getReturnType())
3023 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
3024 Val, Wrapper->getReturnType(), "");
3025
3026 Builder.CreateRet(Val);
3027 }
3028}
3029
3030LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3031 const VarDecl *VD,
3032 QualType LValType) {
3033 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3034 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3035
3036 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3037 CallVal->setCallingConv(Wrapper->getCallingConv());
3038
3039 LValue LV;
3040 if (VD->getType()->isReferenceType())
3041 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3042 else
3043 LV = CGF.MakeAddrLValue(CallVal, LValType,
3044 CGF.getContext().getDeclAlign(VD));
3045 // FIXME: need setObjCGCLValueClass?
3046 return LV;
3047}
3048
3049/// Return whether the given global decl needs a VTT parameter, which it does
3050/// if it's a base constructor or destructor with virtual bases.
3051bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3052 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3053
3054 // We don't have any virtual bases, just return early.
3055 if (!MD->getParent()->getNumVBases())
3056 return false;
3057
3058 // Check if we have a base constructor.
3059 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3060 return true;
3061
3062 // Check if we have a base destructor.
3063 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3064 return true;
3065
3066 return false;
3067}
3068
3069namespace {
3070class ItaniumRTTIBuilder {
3071 CodeGenModule &CGM; // Per-module state.
3072 llvm::LLVMContext &VMContext;
3073 const ItaniumCXXABI &CXXABI; // Per-module state.
3074
3075 /// Fields - The fields of the RTTI descriptor currently being built.
3077
3078 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3079 llvm::GlobalVariable *
3080 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3081
3082 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3083 /// descriptor of the given type.
3084 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3085
3086 /// BuildVTablePointer - Build the vtable pointer for the given type.
3087 void BuildVTablePointer(const Type *Ty);
3088
3089 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3090 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3091 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3092
3093 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3094 /// classes with bases that do not satisfy the abi::__si_class_type_info
3095 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3096 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3097
3098 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3099 /// for pointer types.
3100 void BuildPointerTypeInfo(QualType PointeeTy);
3101
3102 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3103 /// type_info for an object type.
3104 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3105
3106 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3107 /// struct, used for member pointer types.
3108 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3109
3110public:
3111 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3112 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3113
3114 // Pointer type info flags.
3115 enum {
3116 /// PTI_Const - Type has const qualifier.
3117 PTI_Const = 0x1,
3118
3119 /// PTI_Volatile - Type has volatile qualifier.
3120 PTI_Volatile = 0x2,
3121
3122 /// PTI_Restrict - Type has restrict qualifier.
3123 PTI_Restrict = 0x4,
3124
3125 /// PTI_Incomplete - Type is incomplete.
3126 PTI_Incomplete = 0x8,
3127
3128 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3129 /// (in pointer to member).
3130 PTI_ContainingClassIncomplete = 0x10,
3131
3132 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3133 //PTI_TransactionSafe = 0x20,
3134
3135 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3136 PTI_Noexcept = 0x40,
3137 };
3138
3139 // VMI type info flags.
3140 enum {
3141 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3142 VMI_NonDiamondRepeat = 0x1,
3143
3144 /// VMI_DiamondShaped - Class is diamond shaped.
3145 VMI_DiamondShaped = 0x2
3146 };
3147
3148 // Base class type info flags.
3149 enum {
3150 /// BCTI_Virtual - Base class is virtual.
3151 BCTI_Virtual = 0x1,
3152
3153 /// BCTI_Public - Base class is public.
3154 BCTI_Public = 0x2
3155 };
3156
3157 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3158 /// link to an existing RTTI descriptor if one already exists.
3159 llvm::Constant *BuildTypeInfo(QualType Ty);
3160
3161 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3162 llvm::Constant *BuildTypeInfo(
3163 QualType Ty,
3164 llvm::GlobalVariable::LinkageTypes Linkage,
3165 llvm::GlobalValue::VisibilityTypes Visibility,
3166 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3167};
3168}
3169
3170llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3171 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3172 SmallString<256> Name;
3173 llvm::raw_svector_ostream Out(Name);
3175
3176 // We know that the mangled name of the type starts at index 4 of the
3177 // mangled name of the typename, so we can just index into it in order to
3178 // get the mangled name of the type.
3179 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3180 Name.substr(4));
3181 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3182
3183 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3184 Name, Init->getType(), Linkage, Align.getAsAlign());
3185
3186 GV->setInitializer(Init);
3187
3188 return GV;
3189}
3190
3191llvm::Constant *
3192ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3193 // Mangle the RTTI name.
3194 SmallString<256> Name;
3195 llvm::raw_svector_ostream Out(Name);
3196 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3197
3198 // Look for an existing global.
3199 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3200
3201 if (!GV) {
3202 // Create a new global variable.
3203 // Note for the future: If we would ever like to do deferred emission of
3204 // RTTI, check if emitting vtables opportunistically need any adjustment.
3205
3206 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3207 /*isConstant=*/true,
3208 llvm::GlobalValue::ExternalLinkage, nullptr,
3209 Name);
3210 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3211 CGM.setGVProperties(GV, RD);
3212 // Import the typeinfo symbol when all non-inline virtual methods are
3213 // imported.
3214 if (CGM.getTarget().hasPS4DLLImportExport()) {
3215 if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3216 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3217 CGM.setDSOLocal(GV);
3218 }
3219 }
3220 }
3221
3222 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3223}
3224
3225/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3226/// info for that type is defined in the standard library.
3228 // Itanium C++ ABI 2.9.2:
3229 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3230 // the run-time support library. Specifically, the run-time support
3231 // library should contain type_info objects for the types X, X* and
3232 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3233 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3234 // long, unsigned long, long long, unsigned long long, float, double,
3235 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3236 // half-precision floating point types.
3237 //
3238 // GCC also emits RTTI for __int128.
3239 // FIXME: We do not emit RTTI information for decimal types here.
3240
3241 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3242 switch (Ty->getKind()) {
3243 case BuiltinType::Void:
3244 case BuiltinType::NullPtr:
3245 case BuiltinType::Bool:
3246 case BuiltinType::WChar_S:
3247 case BuiltinType::WChar_U:
3248 case BuiltinType::Char_U:
3249 case BuiltinType::Char_S:
3250 case BuiltinType::UChar:
3251 case BuiltinType::SChar:
3252 case BuiltinType::Short:
3253 case BuiltinType::UShort:
3254 case BuiltinType::Int:
3255 case BuiltinType::UInt:
3256 case BuiltinType::Long:
3257 case BuiltinType::ULong:
3258 case BuiltinType::LongLong:
3259 case BuiltinType::ULongLong:
3260 case BuiltinType::Half:
3261 case BuiltinType::Float:
3262 case BuiltinType::Double:
3263 case BuiltinType::LongDouble:
3264 case BuiltinType::Float16:
3265 case BuiltinType::Float128:
3266 case BuiltinType::Ibm128:
3267 case BuiltinType::Char8:
3268 case BuiltinType::Char16:
3269 case BuiltinType::Char32:
3270 case BuiltinType::Int128:
3271 case BuiltinType::UInt128:
3272 return true;
3273
3274#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3275 case BuiltinType::Id:
3276#include "clang/Basic/OpenCLImageTypes.def"
3277#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3278 case BuiltinType::Id:
3279#include "clang/Basic/OpenCLExtensionTypes.def"
3280 case BuiltinType::OCLSampler:
3281 case BuiltinType::OCLEvent:
3282 case BuiltinType::OCLClkEvent:
3283 case BuiltinType::OCLQueue:
3284 case BuiltinType::OCLReserveID:
3285#define SVE_TYPE(Name, Id, SingletonId) \
3286 case BuiltinType::Id:
3287#include "clang/Basic/AArch64SVEACLETypes.def"
3288#define PPC_VECTOR_TYPE(Name, Id, Size) \
3289 case BuiltinType::Id:
3290#include "clang/Basic/PPCTypes.def"
3291#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3292#include "clang/Basic/RISCVVTypes.def"
3293#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3294#include "clang/Basic/WebAssemblyReferenceTypes.def"
3295 case BuiltinType::ShortAccum:
3296 case BuiltinType::Accum:
3297 case BuiltinType::LongAccum:
3298 case BuiltinType::UShortAccum:
3299 case BuiltinType::UAccum:
3300 case BuiltinType::ULongAccum:
3301 case BuiltinType::ShortFract:
3302 case BuiltinType::Fract:
3303 case BuiltinType::LongFract:
3304 case BuiltinType::UShortFract:
3305 case BuiltinType::UFract:
3306 case BuiltinType::ULongFract:
3307 case BuiltinType::SatShortAccum:
3308 case BuiltinType::SatAccum:
3309 case BuiltinType::SatLongAccum:
3310 case BuiltinType::SatUShortAccum:
3311 case BuiltinType::SatUAccum:
3312 case BuiltinType::SatULongAccum:
3313 case BuiltinType::SatShortFract:
3314 case BuiltinType::SatFract:
3315 case BuiltinType::SatLongFract:
3316 case BuiltinType::SatUShortFract:
3317 case BuiltinType::SatUFract:
3318 case BuiltinType::SatULongFract:
3319 case BuiltinType::BFloat16:
3320 return false;
3321
3322 case BuiltinType::Dependent:
3323#define BUILTIN_TYPE(Id, SingletonId)
3324#define PLACEHOLDER_TYPE(Id, SingletonId) \
3325 case BuiltinType::Id:
3326#include "clang/AST/BuiltinTypes.def"
3327 llvm_unreachable("asking for RRTI for a placeholder type!");
3328
3329 case BuiltinType::ObjCId:
3330 case BuiltinType::ObjCClass:
3331 case BuiltinType::ObjCSel:
3332 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3333 }
3334
3335 llvm_unreachable("Invalid BuiltinType Kind!");
3336}
3337
3338static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3339 QualType PointeeTy = PointerTy->getPointeeType();
3340 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3341 if (!BuiltinTy)
3342 return false;
3343
3344 // Check the qualifiers.
3345 Qualifiers Quals = PointeeTy.getQualifiers();
3346 Quals.removeConst();
3347
3348 if (!Quals.empty())
3349 return false;
3350
3351 return TypeInfoIsInStandardLibrary(BuiltinTy);
3352}
3353
3354/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3355/// information for the given type exists in the standard library.
3357 // Type info for builtin types is defined in the standard library.
3358 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3359 return TypeInfoIsInStandardLibrary(BuiltinTy);
3360
3361 // Type info for some pointer types to builtin types is defined in the
3362 // standard library.
3363 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3364 return TypeInfoIsInStandardLibrary(PointerTy);
3365
3366 return false;
3367}
3368
3369/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3370/// the given type exists somewhere else, and that we should not emit the type
3371/// information in this translation unit. Assumes that it is not a
3372/// standard-library type.
3374 QualType Ty) {
3375 ASTContext &Context = CGM.getContext();
3376
3377 // If RTTI is disabled, assume it might be disabled in the
3378 // translation unit that defines any potential key function, too.
3379 if (!Context.getLangOpts().RTTI) return false;
3380
3381 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3382 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3383 if (!RD->hasDefinition())
3384 return false;
3385
3386 if (!RD->isDynamicClass())
3387 return false;
3388
3389 // FIXME: this may need to be reconsidered if the key function
3390 // changes.
3391 // N.B. We must always emit the RTTI data ourselves if there exists a key
3392 // function.
3393 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3394
3395 // Don't import the RTTI but emit it locally.
3396 if (CGM.getTriple().isWindowsGNUEnvironment())
3397 return false;
3398
3399 if (CGM.getVTables().isVTableExternal(RD)) {
3400 if (CGM.getTarget().hasPS4DLLImportExport())
3401 return true;
3402
3403 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3404 ? false
3405 : true;
3406 }
3407 if (IsDLLImport)
3408 return true;
3409 }
3410
3411 return false;
3412}
3413
3414/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3415static bool IsIncompleteClassType(const RecordType *RecordTy) {
3416 return !RecordTy->getDecl()->isCompleteDefinition();
3417}
3418
3419/// ContainsIncompleteClassType - Returns whether the given type contains an
3420/// incomplete class type. This is true if
3421///
3422/// * The given type is an incomplete class type.
3423/// * The given type is a pointer type whose pointee type contains an
3424/// incomplete class type.
3425/// * The given type is a member pointer type whose class is an incomplete
3426/// class type.
3427/// * The given type is a member pointer type whoise pointee type contains an
3428/// incomplete class type.
3429/// is an indirect or direct pointer to an incomplete class type.
3431 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3432 if (IsIncompleteClassType(RecordTy))
3433 return true;
3434 }
3435
3436 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3437 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3438
3439 if (const MemberPointerType *MemberPointerTy =
3440 dyn_cast<MemberPointerType>(Ty)) {
3441 // Check if the class type is incomplete.
3442 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3443 if (IsIncompleteClassType(ClassType))
3444 return true;
3445
3446 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3447 }
3448
3449 return false;
3450}
3451
3452// CanUseSingleInheritance - Return whether the given record decl has a "single,
3453// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3454// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3456 // Check the number of bases.
3457 if (RD->getNumBases() != 1)
3458 return false;
3459
3460 // Get the base.
3462
3463 // Check that the base is not virtual.
3464 if (Base->isVirtual())
3465 return false;
3466
3467 // Check that the base is public.
3468 if (Base->getAccessSpecifier() != AS_public)
3469 return false;
3470
3471 // Check that the class is dynamic iff the base is.
3472 auto *BaseDecl =
3473 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3474 if (!BaseDecl->isEmpty() &&
3475 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3476 return false;
3477
3478 return true;
3479}
3480
3481void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3482 // abi::__class_type_info.
3483 static const char * const ClassTypeInfo =
3484 "_ZTVN10__cxxabiv117__class_type_infoE";
3485 // abi::__si_class_type_info.
3486 static const char * const SIClassTypeInfo =
3487 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3488 // abi::__vmi_class_type_info.
3489 static const char * const VMIClassTypeInfo =
3490 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3491
3492 const char *VTableName = nullptr;
3493
3494 switch (Ty->getTypeClass()) {
3495#define TYPE(Class, Base)
3496#define ABSTRACT_TYPE(Class, Base)
3497#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3498#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3499#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3500#include "clang/AST/TypeNodes.inc"
3501 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3502
3503 case Type::LValueReference:
3504 case Type::RValueReference:
3505 llvm_unreachable("References shouldn't get here");
3506
3507 case Type::Auto:
3508 case Type::DeducedTemplateSpecialization:
3509 llvm_unreachable("Undeduced type shouldn't get here");
3510
3511 case Type::Pipe:
3512 llvm_unreachable("Pipe types shouldn't get here");
3513
3514 case Type::Builtin:
3515 case Type::BitInt:
3516 // GCC treats vector and complex types as fundamental types.
3517 case Type::Vector:
3518 case Type::ExtVector:
3519 case Type::ConstantMatrix:
3520 case Type::Complex:
3521 case Type::Atomic:
3522 // FIXME: GCC treats block pointers as fundamental types?!
3523 case Type::BlockPointer:
3524 // abi::__fundamental_type_info.
3525 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3526 break;
3527
3528 case Type::ConstantArray:
3529 case Type::IncompleteArray:
3530 case Type::VariableArray:
3531 // abi::__array_type_info.
3532 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3533 break;
3534
3535 case Type::FunctionNoProto:
3536 case Type::FunctionProto:
3537 // abi::__function_type_info.
3538 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3539 break;
3540
3541 case Type::Enum:
3542 // abi::__enum_type_info.
3543 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3544 break;
3545
3546 case Type::Record: {
3547 const CXXRecordDecl *RD =
3548 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3549
3550 if (!RD->hasDefinition() || !RD->getNumBases()) {
3551 VTableName = ClassTypeInfo;
3552 } else if (CanUseSingleInheritance(RD)) {
3553 VTableName = SIClassTypeInfo;
3554 } else {
3555 VTableName = VMIClassTypeInfo;
3556 }
3557
3558 break;
3559 }
3560
3561 case Type::ObjCObject:
3562 // Ignore protocol qualifiers.
3563 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3564
3565 // Handle id and Class.
3566 if (isa<BuiltinType>(Ty)) {
3567 VTableName = ClassTypeInfo;
3568 break;
3569 }
3570
3571 assert(isa<ObjCInterfaceType>(Ty));
3572 [[fallthrough]];
3573
3574 case Type::ObjCInterface:
3575 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3576 VTableName = SIClassTypeInfo;
3577 } else {
3578 VTableName = ClassTypeInfo;
3579 }
3580 break;
3581
3582 case Type::ObjCObjectPointer:
3583 case Type::Pointer:
3584 // abi::__pointer_type_info.
3585 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3586 break;
3587
3588 case Type::MemberPointer:
3589 // abi::__pointer_to_member_type_info.
3590 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3591 break;
3592 }
3593
3594 llvm::Constant *VTable = nullptr;
3595
3596 // Check if the alias exists. If it doesn't, then get or create the global.
3598 VTable = CGM.getModule().getNamedAlias(VTableName);
3599 if (!VTable)
3600 VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3601
3602 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3603
3604 llvm::Type *PtrDiffTy =
3606
3607 // The vtable address point is 2.
3609 // The vtable address point is 8 bytes after its start:
3610 // 4 for the offset to top + 4 for the relative offset to rtti.
3611 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3612 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3613 VTable =
3614 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3615 } else {
3616 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3617 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3618 Two);
3619 }
3620 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3621
3622 Fields.push_back(VTable);
3623}
3624
3625/// Return the linkage that the type info and type info name constants
3626/// should have for the given type.
3627static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3628 QualType Ty) {
3629 // Itanium C++ ABI 2.9.5p7:
3630 // In addition, it and all of the intermediate abi::__pointer_type_info
3631 // structs in the chain down to the abi::__class_type_info for the
3632 // incomplete class type must be prevented from resolving to the
3633 // corresponding type_info structs for the complete class type, possibly
3634 // by making them local static objects. Finally, a dummy class RTTI is
3635 // generated for the incomplete type that will not resolve to the final
3636 // complete class RTTI (because the latter need not exist), possibly by
3637 // making it a local static object.
3639 return llvm::GlobalValue::InternalLinkage;
3640
3641 switch (Ty->getLinkage()) {
3642 case NoLinkage:
3643 case InternalLinkage:
3645 return llvm::GlobalValue::InternalLinkage;
3646
3647 case VisibleNoLinkage:
3648 case ModuleLinkage:
3649 case ExternalLinkage:
3650 // RTTI is not enabled, which means that this type info struct is going
3651 // to be used for exception handling. Give it linkonce_odr linkage.
3652 if (!CGM.getLangOpts().RTTI)
3653 return llvm::GlobalValue::LinkOnceODRLinkage;
3654
3655 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3656 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3657 if (RD->hasAttr<WeakAttr>())
3658 return llvm::GlobalValue::WeakODRLinkage;
3659 if (CGM.getTriple().isWindowsItaniumEnvironment())
3660 if (RD->hasAttr<DLLImportAttr>() &&
3662 return llvm::GlobalValue::ExternalLinkage;
3663 // MinGW always uses LinkOnceODRLinkage for type info.
3664 if (RD->isDynamicClass() &&
3665 !CGM.getContext()
3666 .getTargetInfo()
3667 .getTriple()
3668 .isWindowsGNUEnvironment())
3669 return CGM.getVTableLinkage(RD);
3670 }
3671
3672 return llvm::GlobalValue::LinkOnceODRLinkage;
3673 }
3674
3675 llvm_unreachable("Invalid linkage!");
3676}
3677
3678llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3679 // We want to operate on the canonical type.
3680 Ty = Ty.getCanonicalType();
3681
3682 // Check if we've already emitted an RTTI descriptor for this type.
3683 SmallString<256> Name;
3684 llvm::raw_svector_ostream Out(Name);
3685 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3686
3687 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3688 if (OldGV && !OldGV->isDeclaration()) {
3689 assert(!OldGV->hasAvailableExternallyLinkage() &&
3690 "available_externally typeinfos not yet implemented");
3691
3692 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3693 }
3694
3695 // Check if there is already an external RTTI descriptor for this type.
3698 return GetAddrOfExternalRTTIDescriptor(Ty);
3699
3700 // Emit the standard library with external linkage.
3701 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3702
3703 // Give the type_info object and name the formal visibility of the
3704 // type itself.
3705 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3706 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3707 // If the linkage is local, only default visibility makes sense.
3708 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3709 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3710 ItaniumCXXABI::RUK_NonUniqueHidden)
3711 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3712 else
3713 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3714
3715 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3716 llvm::GlobalValue::DefaultStorageClass;
3717 if (auto RD = Ty->getAsCXXRecordDecl()) {
3718 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3719 RD->hasAttr<DLLExportAttr>()) ||
3721 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3722 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3723 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3724 }
3725 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3726}
3727
3728llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3729 QualType Ty,
3730 llvm::GlobalVariable::LinkageTypes Linkage,
3731 llvm::GlobalValue::VisibilityTypes Visibility,
3732 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3733 // Add the vtable pointer.
3734 BuildVTablePointer(cast<Type>(Ty));
3735
3736 // And the name.
3737 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3738 llvm::Constant *TypeNameField;
3739
3740 // If we're supposed to demote the visibility, be sure to set a flag
3741 // to use a string comparison for type_info comparisons.
3742 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3743 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3744 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3745 // The flag is the sign bit, which on ARM64 is defined to be clear
3746 // for global pointers. This is very ARM64-specific.
3747 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3748 llvm::Constant *flag =
3749 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3750 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3751 TypeNameField =
3752 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3753 } else {
3754 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3755 }
3756 Fields.push_back(TypeNameField);
3757
3758 switch (Ty->getTypeClass()) {
3759#define TYPE(Class, Base)
3760#define ABSTRACT_TYPE(Class, Base)
3761#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3762#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3763#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3764#include "clang/AST/TypeNodes.inc"
3765 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3766
3767 // GCC treats vector types as fundamental types.
3768 case Type::Builtin:
3769 case Type::Vector:
3770 case Type::ExtVector:
3771 case Type::ConstantMatrix:
3772 case Type::Complex:
3773 case Type::BlockPointer:
3774 // Itanium C++ ABI 2.9.5p4:
3775 // abi::__fundamental_type_info adds no data members to std::type_info.
3776 break;
3777
3778 case Type::LValueReference:
3779 case Type::RValueReference:
3780 llvm_unreachable("References shouldn't get here");
3781
3782 case Type::Auto:
3783 case Type::DeducedTemplateSpecialization:
3784 llvm_unreachable("Undeduced type shouldn't get here");
3785
3786 case Type::Pipe:
3787 break;
3788
3789 case Type::BitInt:
3790 break;
3791
3792 case Type::ConstantArray:
3793 case Type::IncompleteArray:
3794 case Type::VariableArray:
3795 // Itanium C++ ABI 2.9.5p5:
3796 // abi::__array_type_info adds no data members to std::type_info.
3797 break;
3798
3799 case Type::FunctionNoProto:
3800 case Type::FunctionProto:
3801 // Itanium C++ ABI 2.9.5p5:
3802 // abi::__function_type_info adds no data members to std::type_info.
3803 break;
3804
3805 case Type::Enum:
3806 // Itanium C++ ABI 2.9.5p5:
3807 // abi::__enum_type_info adds no data members to std::type_info.
3808 break;
3809
3810 case Type::Record: {
3811 const CXXRecordDecl *RD =
3812 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3813 if (!RD->hasDefinition() || !RD->getNumBases()) {
3814 // We don't need to emit any fields.
3815 break;
3816 }
3817
3819 BuildSIClassTypeInfo(RD);
3820 else
3821 BuildVMIClassTypeInfo(RD);
3822
3823 break;
3824 }
3825
3826 case Type::ObjCObject:
3827 case Type::ObjCInterface:
3828 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3829 break;
3830
3831 case Type::ObjCObjectPointer:
3832 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3833 break;
3834
3835 case Type::Pointer:
3836 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3837 break;
3838
3839 case Type::MemberPointer:
3840 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3841 break;
3842
3843 case Type::Atomic:
3844 // No fields, at least for the moment.
3845 break;
3846 }
3847
3848 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3849
3850 SmallString<256> Name;
3851 llvm::raw_svector_ostream Out(Name);
3852 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3853 llvm::Module &M = CGM.getModule();
3854 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3855 llvm::GlobalVariable *GV =
3856 new llvm::GlobalVariable(M, Init->getType(),
3857 /*isConstant=*/true, Linkage, Init, Name);
3858
3859 // Export the typeinfo in the same circumstances as the vtable is exported.
3860 auto GVDLLStorageClass = DLLStorageClass;
3861 if (CGM.getTarget().hasPS4DLLImportExport()) {
3862 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3863 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3864 if (RD->hasAttr<DLLExportAttr>() ||
3865 CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3866 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3867 }
3868 }
3869 }
3870
3871 // If there's already an old global variable, replace it with the new one.
3872 if (OldGV) {
3873 GV->takeName(OldGV);
3874 llvm::Constant *NewPtr =
3875 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3876 OldGV->replaceAllUsesWith(NewPtr);
3877 OldGV->eraseFromParent();
3878 }
3879
3880 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3881 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3882
3885 GV->setAlignment(Align.getAsAlign());
3886
3887 // The Itanium ABI specifies that type_info objects must be globally
3888 // unique, with one exception: if the type is an incomplete class
3889 // type or a (possibly indirect) pointer to one. That exception
3890 // affects the general case of comparing type_info objects produced
3891 // by the typeid operator, which is why the comparison operators on
3892 // std::type_info generally use the type_info name pointers instead
3893 // of the object addresses. However, the language's built-in uses
3894 // of RTTI generally require class types to be complete, even when
3895 // manipulating pointers to those class types. This allows the
3896 // implementation of dynamic_cast to rely on address equality tests,
3897 // which is much faster.
3898
3899 // All of this is to say that it's important that both the type_info
3900 // object and the type_info name be uniqued when weakly emitted.
3901
3902 TypeName->setVisibility(Visibility);
3903 CGM.setDSOLocal(TypeName);
3904
3905 GV->setVisibility(Visibility);
3906 CGM.setDSOLocal(GV);
3907
3908 TypeName->setDLLStorageClass(DLLStorageClass);
3909 GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3910 ? GVDLLStorageClass
3911 : DLLStorageClass);
3912
3913 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3914 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3915
3916 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3917}
3918
3919/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3920/// for the given Objective-C object type.
3921void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3922 // Drop qualifiers.
3923 const Type *T = OT->getBaseType().getTypePtr();
3924 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3925
3926 // The builtin types are abi::__class_type_infos and don't require
3927 // extra fields.
3928 if (isa<BuiltinType>(T)) return;
3929
3930 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3931 ObjCInterfaceDecl *Super = Class->getSuperClass();
3932
3933 // Root classes are also __class_type_info.
3934 if (!Super) return;
3935
3936 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3937
3938 // Everything else is single inheritance.
3939 llvm::Constant *BaseTypeInfo =
3940 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3941 Fields.push_back(BaseTypeInfo);
3942}
3943
3944/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3945/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3946void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3947 // Itanium C++ ABI 2.9.5p6b:
3948 // It adds to abi::__class_type_info a single member pointing to the
3949 // type_info structure for the base type,
3950 llvm::Constant *BaseTypeInfo =
3951 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3952 Fields.push_back(BaseTypeInfo);
3953}
3954
3955namespace {
3956 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3957 /// a class hierarchy.
3958 struct SeenBases {
3961 };
3962}
3963
3964/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3965/// abi::__vmi_class_type_info.
3966///
3968 SeenBases &Bases) {
3969
3970 unsigned Flags = 0;
3971
3972 auto *BaseDecl =
3973 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3974
3975 if (Base->isVirtual()) {
3976 // Mark the virtual base as seen.
3977 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3978 // If this virtual base has been seen before, then the class is diamond
3979 // shaped.
3980 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3981 } else {
3982 if (Bases.NonVirtualBases.count(BaseDecl))
3983 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3984 }
3985 } else {
3986 // Mark the non-virtual base as seen.
3987 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3988 // If this non-virtual base has been seen before, then the class has non-
3989 // diamond shaped repeated inheritance.
3990 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3991 } else {
3992 if (Bases.VirtualBases.count(BaseDecl))
3993 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3994 }
3995 }
3996
3997 // Walk all bases.
3998 for (const auto &I : BaseDecl->bases())
3999 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4000
4001 return Flags;
4002}
4003
4005 unsigned Flags = 0;
4006 SeenBases Bases;
4007
4008 // Walk all bases.
4009 for (const auto &I : RD->bases())
4010 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4011
4012 return Flags;
4013}
4014
4015/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4016/// classes with bases that do not satisfy the abi::__si_class_type_info
4017/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4018void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4019 llvm::Type *UnsignedIntLTy =
4021
4022 // Itanium C++ ABI 2.9.5p6c:
4023 // __flags is a word with flags describing details about the class
4024 // structure, which may be referenced by using the __flags_masks
4025 // enumeration. These flags refer to both direct and indirect bases.
4026 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4027 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4028
4029 // Itanium C++ ABI 2.9.5p6c:
4030 // __base_count is a word with the number of direct proper base class
4031 // descriptions that follow.
4032 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4033
4034 if (!RD->getNumBases())
4035 return;
4036
4037 // Now add the base class descriptions.
4038
4039 // Itanium C++ ABI 2.9.5p6c:
4040 // __base_info[] is an array of base class descriptions -- one for every
4041 // direct proper base. Each description is of the type:
4042 //
4043 // struct abi::__base_class_type_info {
4044 // public:
4045 // const __class_type_info *__base_type;
4046 // long __offset_flags;
4047 //
4048 // enum __offset_flags_masks {
4049 // __virtual_mask = 0x1,
4050 // __public_mask = 0x2,
4051 // __offset_shift = 8
4052 // };
4053 // };
4054
4055 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4056 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4057 // LLP64 platforms.
4058 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4059 // LLP64 platforms.
4060 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4061 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4062 if (TI.getTriple().isOSCygMing() &&
4064 OffsetFlagsTy = CGM.getContext().LongLongTy;
4065 llvm::Type *OffsetFlagsLTy =
4066 CGM.getTypes().ConvertType(OffsetFlagsTy);
4067
4068 for (const auto &Base : RD->bases()) {
4069 // The __base_type member points to the RTTI for the base type.
4070 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4071
4072 auto *BaseDecl =
4073 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4074
4075 int64_t OffsetFlags = 0;
4076
4077 // All but the lower 8 bits of __offset_flags are a signed offset.
4078 // For a non-virtual base, this is the offset in the object of the base
4079 // subobject. For a virtual base, this is the offset in the virtual table of
4080 // the virtual base offset for the virtual base referenced (negative).
4082 if (Base.isVirtual())
4083 Offset =
4085 else {
4086 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4087 Offset = Layout.getBaseClassOffset(BaseDecl);
4088 };
4089
4090 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4091
4092 // The low-order byte of __offset_flags contains flags, as given by the
4093 // masks from the enumeration __offset_flags_masks.
4094 if (Base.isVirtual())
4095 OffsetFlags |= BCTI_Virtual;
4096 if (Base.getAccessSpecifier() == AS_public)
4097 OffsetFlags |= BCTI_Public;
4098
4099 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4100 }
4101}
4102
4103/// Compute the flags for a __pbase_type_info, and remove the corresponding
4104/// pieces from \p Type.
4106 unsigned Flags = 0;
4107
4108 if (Type.isConstQualified())
4109 Flags |= ItaniumRTTIBuilder::PTI_Const;
4110 if (Type.isVolatileQualified())
4111 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4112 if (Type.isRestrictQualified())
4113 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4114 Type = Type.getUnqualifiedType();
4115
4116 // Itanium C++ ABI 2.9.5p7:
4117 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4118 // incomplete class type, the incomplete target type flag is set.
4120 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4121
4122 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4123 if (Proto->isNothrow()) {
4124 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4126 }
4127 }
4128
4129 return Flags;
4130}
4131
4132/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4133/// used for pointer types.
4134void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4135 // Itanium C++ ABI 2.9.5p7:
4136 // __flags is a flag word describing the cv-qualification and other
4137 // attributes of the type pointed to
4138 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4139
4140 llvm::Type *UnsignedIntLTy =
4142 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4143
4144 // Itanium C++ ABI 2.9.5p7:
4145 // __pointee is a pointer to the std::type_info derivation for the
4146 // unqualified type being pointed to.
4147 llvm::Constant *PointeeTypeInfo =
4148 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4149 Fields.push_back(PointeeTypeInfo);
4150}
4151
4152/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4153/// struct, used for member pointer types.
4154void
4155ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4156 QualType PointeeTy = Ty->getPointeeType();
4157
4158 // Itanium C++ ABI 2.9.5p7:
4159 // __flags is a flag word describing the cv-qualification and other
4160 // attributes of the type pointed to.
4161 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4162
4163 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4164 if (IsIncompleteClassType(ClassType))
4165 Flags |= PTI_ContainingClassIncomplete;
4166
4167 llvm::Type *UnsignedIntLTy =
4169 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4170
4171 // Itanium C++ ABI 2.9.5p7:
4172 // __pointee is a pointer to the std::type_info derivation for the
4173 // unqualified type being pointed to.
4174 llvm::Constant *PointeeTypeInfo =
4175 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4176 Fields.push_back(PointeeTypeInfo);
4177
4178 // Itanium C++ ABI 2.9.5p9:
4179 // __context is a pointer to an abi::__class_type_info corresponding to the
4180 // class type containing the member pointed to
4181 // (e.g., the "A" in "int A::*").
4182 Fields.push_back(
4183 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4184}
4185
4186llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4187 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4188}
4189
4190void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4191 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4192 QualType FundamentalTypes[] = {
4193 getContext().VoidTy, getContext().NullPtrTy,
4194 getContext().BoolTy, getContext().WCharTy,
4195 getContext().CharTy, getContext().UnsignedCharTy,
4196 getContext().SignedCharTy, getContext().ShortTy,
4197 getContext().UnsignedShortTy, getContext().IntTy,
4198 getContext().UnsignedIntTy, getContext().LongTy,
4199 getContext().UnsignedLongTy, getContext().LongLongTy,
4200 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4201 getContext().UnsignedInt128Ty, getContext().HalfTy,
4202 getContext().FloatTy, getContext().DoubleTy,
4203 getContext().LongDoubleTy, getContext().Float128Ty,
4204 getContext().Char8Ty, getContext().Char16Ty,
4205 getContext().Char32Ty
4206 };
4207 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4208 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4209 ? llvm::GlobalValue::DLLExportStorageClass
4210 : llvm::GlobalValue::DefaultStorageClass;
4211 llvm::GlobalValue::VisibilityTypes Visibility =
4213 for (const QualType &FundamentalType : FundamentalTypes) {
4214 QualType PointerType = getContext().getPointerType(FundamentalType);
4215 QualType PointerTypeConst = getContext().getPointerType(
4216 FundamentalType.withConst());
4217 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4218 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4219 Type, llvm::GlobalValue::ExternalLinkage,
4220 Visibility, DLLStorageClass);
4221 }
4222}
4223
4224/// What sort of uniqueness rules should we use for the RTTI for the
4225/// given type?
4226ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4227 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4228 if (shouldRTTIBeUnique())
4229 return RUK_Unique;
4230
4231 // It's only necessary for linkonce_odr or weak_odr linkage.
4232 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4233 Linkage != llvm::GlobalValue::WeakODRLinkage)
4234 return RUK_Unique;
4235
4236 // It's only necessary with default visibility.
4237 if (CanTy->getVisibility() != DefaultVisibility)
4238 return RUK_Unique;
4239
4240 // If we're not required to publish this symbol, hide it.
4241 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4242 return RUK_NonUniqueHidden;
4243
4244 // If we're required to publish this symbol, as we might be under an
4245 // explicit instantiation, leave it with default visibility but
4246 // enable string-comparisons.
4247 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4248 return RUK_NonUniqueVisible;
4249}
4250
4251// Find out how to codegen the complete destructor and constructor
4252namespace {
4253enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4254}
4255static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4256 const CXXMethodDecl *MD) {
4257 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4258 return StructorCodegen::Emit;
4259
4260 // The complete and base structors are not equivalent if there are any virtual
4261 // bases, so emit separate functions.
4262 if (MD->getParent()->getNumVBases())
4263 return StructorCodegen::Emit;
4264
4266 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4268 } else {
4269 const auto *CD = cast<CXXConstructorDecl>(MD);
4271 }
4272 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4273
4274 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4275 return StructorCodegen::RAUW;
4276
4277 // FIXME: Should we allow available_externally aliases?
4278 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4279 return StructorCodegen::RAUW;
4280
4281 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4282 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4283 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4284 CGM.getTarget().getTriple().isOSBinFormatWasm())
4285 return StructorCodegen::COMDAT;
4286 return StructorCodegen::Emit;
4287 }
4288
4289 return StructorCodegen::Alias;
4290}
4291
4294 GlobalDecl TargetDecl) {
4295 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4296
4297 StringRef MangledName = CGM.getMangledName(AliasDecl);
4298 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4299 if (Entry && !Entry->isDeclaration())
4300 return;
4301
4302 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4303
4304 // Create the alias with no name.
4305 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4306
4307 // Constructors and destructors are always unnamed_addr.
4308 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4309
4310 // Switch any previous uses to the alias.
4311 if (Entry) {
4312 assert(Entry->getType() == Aliasee->getType() &&
4313 "declaration exists with different type");
4314 Alias->takeName(Entry);
4315 Entry->replaceAllUsesWith(Alias);
4316 Entry->eraseFromParent();
4317 } else {
4318 Alias->setName(MangledName);
4319 }
4320
4321 // Finally, set up the alias with its proper name and attributes.
4322 CGM.SetCommonAttributes(AliasDecl, Alias);
4323}
4324
4325void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4326 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4327 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4328 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4329
4330 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4331
4332 if (CD ? GD.getCtorType() == Ctor_Complete
4333 : GD.getDtorType() == Dtor_Complete) {
4334 GlobalDecl BaseDecl;
4335 if (CD)
4336 BaseDecl = GD.getWithCtorType(Ctor_Base);
4337 else
4338 BaseDecl = GD.getWithDtorType(Dtor_Base);
4339
4340 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4341 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4342 return;
4343 }
4344
4345 if (CGType == StructorCodegen::RAUW) {
4346 StringRef MangledName = CGM.getMangledName(GD);
4347 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4348 CGM.addReplacement(MangledName, Aliasee);
4349 return;
4350 }
4351 }
4352
4353 // The base destructor is equivalent to the base destructor of its
4354 // base class if there is exactly one non-virtual base class with a
4355 // non-trivial destructor, there are no fields with a non-trivial
4356 // destructor, and the body of the destructor is trivial.
4357 if (DD && GD.getDtorType() == Dtor_Base &&
4358 CGType != StructorCodegen::COMDAT &&
4360 return;
4361
4362 // FIXME: The deleting destructor is equivalent to the selected operator
4363 // delete if:
4364 // * either the delete is a destroying operator delete or the destructor
4365 // would be trivial if it weren't virtual,
4366 // * the conversion from the 'this' parameter to the first parameter of the
4367 // destructor is equivalent to a bitcast,
4368 // * the destructor does not have an implicit "this" return, and
4369 // * the operator delete has the same calling convention and IR function type
4370 // as the destructor.
4371 // In such cases we should try to emit the deleting dtor as an alias to the
4372 // selected 'operator delete'.
4373
4374 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4375
4376 if (CGType == StructorCodegen::COMDAT) {
4377 SmallString<256> Buffer;
4378 llvm::raw_svector_ostream Out(Buffer);
4379 if (DD)
4380 getMangleContext().mangleCXXDtorComdat(DD, Out);
4381 else
4382 getMangleContext().mangleCXXCtorComdat(CD, Out);
4383 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4384 Fn->setComdat(C);
4385 } else {
4386 CGM.maybeSetTrivialComdat(*MD, *Fn);
4387 }
4388}
4389
4390static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4391 // void *__cxa_begin_catch(void*);
4392 llvm::FunctionType *FTy = llvm::FunctionType::get(
4393 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4394
4395 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4396}
4397
4398static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4399 // void __cxa_end_catch();
4400 llvm::FunctionType *FTy =
4401 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4402
4403 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4404}
4405
4406static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4407 // void *__cxa_get_exception_ptr(void*);
4408 llvm::FunctionType *FTy = llvm::FunctionType::get(
4409 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4410
4411 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4412}
4413
4414namespace {
4415 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4416 /// exception type lets us state definitively that the thrown exception
4417 /// type does not have a destructor. In particular:
4418 /// - Catch-alls tell us nothing, so we have to conservatively
4419 /// assume that the thrown exception might have a destructor.
4420 /// - Catches by reference behave according to their base types.
4421 /// - Catches of non-record types will only trigger for exceptions
4422 /// of non-record types, which never have destructors.
4423 /// - Catches of record types can trigger for arbitrary subclasses
4424 /// of the caught type, so we have to assume the actual thrown
4425 /// exception type might have a throwing destructor, even if the
4426 /// caught type's destructor is trivial or nothrow.
4427 struct CallEndCatch final : EHScopeStack::Cleanup {
4428 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4429 bool MightThrow;
4430
4431 void Emit(CodeGenFunction &CGF, Flags flags) override {
4432 if (!MightThrow) {
4434 return;
4435 }
4436
4438 }
4439 };
4440}
4441
4442/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4443/// __cxa_end_catch.
4444///
4445/// \param EndMightThrow - true if __cxa_end_catch might throw
4446static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4447 llvm::Value *Exn,
4448 bool EndMightThrow) {
4449 llvm::CallInst *call =
4451
4452 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4453
4454 return call;
4455}
4456
4457/// A "special initializer" callback for initializing a catch
4458/// parameter during catch initialization.
4460 const VarDecl &CatchParam,
4461 Address ParamAddr,
4462 SourceLocation Loc) {
4463 // Load the exception from where the landing pad saved it.
4464 llvm::Value *Exn = CGF.getExceptionFromSlot();
4465
4466 CanQualType CatchType =
4467 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4468 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4469
4470 // If we're catching by reference, we can just cast the object
4471 // pointer to the appropriate pointer.
4472 if (isa<ReferenceType>(CatchType)) {
4473 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4474 bool EndCatchMightThrow = CaughtType->isRecordType();
4475
4476 // __cxa_begin_catch returns the adjusted object pointer.
4477 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4478
4479 // We have no way to tell the personality function that we're
4480 // catching by reference, so if we're catching a pointer,
4481 // __cxa_begin_catch will actually return that pointer by value.
4482 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4483 QualType PointeeType = PT->getPointeeType();
4484
4485 // When catching by reference, generally we should just ignore
4486 // this by-value pointer and use the exception object instead.
4487 if (!PointeeType->isRecordType()) {
4488
4489 // Exn points to the struct _Unwind_Exception header, which
4490 // we have to skip past in order to reach the exception data.
4491 unsigned HeaderSize =
4493 AdjustedExn =
4494 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4495
4496 // However, if we're catching a pointer-to-record type that won't
4497 // work, because the personality function might have adjusted
4498 // the pointer. There's actually no way for us to fully satisfy
4499 // the language/ABI contract here: we can't use Exn because it
4500 // might have the wrong adjustment, but we can't use the by-value
4501 // pointer because it's off by a level of abstraction.
4502 //
4503 // The current solution is to dump the adjusted pointer into an
4504 // alloca, which breaks language semantics (because changing the
4505 // pointer doesn't change the exception) but at least works.
4506 // The better solution would be to filter out non-exact matches
4507 // and rethrow them, but this is tricky because the rethrow
4508 // really needs to be catchable by other sites at this landing
4509 // pad. The best solution is to fix the personality function.
4510 } else {
4511 // Pull the pointer for the reference type off.
4512 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4513
4514 // Create the temporary and write the adjusted pointer into it.
4515 Address ExnPtrTmp =
4516 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4517 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4518 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4519
4520 // Bind the reference to the temporary.
4521 AdjustedExn = ExnPtrTmp.getPointer();
4522 }
4523 }
4524
4525 llvm::Value *ExnCast =
4526 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4527 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4528 return;
4529 }
4530
4531 // Scalars and complexes.
4532 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4533 if (TEK != TEK_Aggregate) {
4534 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4535
4536 // If the catch type is a pointer type, __cxa_begin_catch returns
4537 // the pointer by value.
4538 if (CatchType->hasPointerRepresentation()) {
4539 llvm::Value *CastExn =
4540 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4541
4542 switch (CatchType.getQualifiers().getObjCLifetime()) {
4544 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4545 [[fallthrough]];
4546
4550 CGF.Builder.CreateStore(CastExn, ParamAddr);
4551 return;
4552
4554 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4555 return;
4556 }
4557 llvm_unreachable("bad ownership qualifier!");
4558 }
4559
4560 // Otherwise, it returns a pointer into the exception object.
4561
4562 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4563 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4564
4565 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4566 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4567 switch (TEK) {
4568 case TEK_Complex:
4569 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4570 /*init*/ true);
4571 return;
4572 case TEK_Scalar: {
4573 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4574 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4575 return;
4576 }
4577 case TEK_Aggregate:
4578 llvm_unreachable("evaluation kind filtered out!");
4579 }
4580 llvm_unreachable("bad evaluation kind");
4581 }
4582
4583 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4584 auto catchRD = CatchType->getAsCXXRecordDecl();
4585 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4586
4587 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4588
4589 // Check for a copy expression. If we don't have a copy expression,
4590 // that means a trivial copy is okay.
4591 const Expr *copyExpr = CatchParam.getInit();
4592 if (!copyExpr) {
4593 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4594 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4595 LLVMCatchTy, caughtExnAlignment);
4596 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4597 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4598 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4599 return;
4600 }
4601
4602 // We have to call __cxa_get_exception_ptr to get the adjusted
4603 // pointer before copying.
4604 llvm::CallInst *rawAdjustedExn =
4606
4607 // Cast that to the appropriate type.
4608 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4609 LLVMCatchTy, caughtExnAlignment);
4610
4611 // The copy expression is defined in terms of an OpaqueValueExpr.
4612 // Find it and map it to the adjusted expression.
4613 CodeGenFunction::OpaqueValueMapping
4614 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4615 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4616
4617 // Call the copy ctor in a terminate scope.
4618 CGF.EHStack.pushTerminate();
4619
4620 // Perform the copy construction.
4621 CGF.EmitAggExpr(copyExpr,
4622 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4627
4628 // Leave the terminate scope.
4629 CGF.EHStack.popTerminate();
4630
4631 // Undo the opaque value mapping.
4632 opaque.pop();
4633
4634 // Finally we can call __cxa_begin_catch.
4635 CallBeginCatch(CGF, Exn, true);
4636}
4637
4638/// Begins a catch statement by initializing the catch variable and
4639/// calling __cxa_begin_catch.
4640void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4641 const CXXCatchStmt *S) {
4642 // We have to be very careful with the ordering of cleanups here:
4643 // C++ [except.throw]p4:
4644 // The destruction [of the exception temporary] occurs
4645 // immediately after the destruction of the object declared in
4646 // the exception-declaration in the handler.
4647 //
4648 // So the precise ordering is:
4649 // 1. Construct catch variable.
4650 // 2. __cxa_begin_catch
4651 // 3. Enter __cxa_end_catch cleanup
4652 // 4. Enter dtor cleanup
4653 //
4654 // We do this by using a slightly abnormal initialization process.
4655 // Delegation sequence:
4656 // - ExitCXXTryStmt opens a RunCleanupsScope
4657 // - EmitAutoVarAlloca creates the variable and debug info
4658 // - InitCatchParam initializes the variable from the exception
4659 // - CallBeginCatch calls __cxa_begin_catch
4660 // - CallBeginCatch enters the __cxa_end_catch cleanup
4661 // - EmitAutoVarCleanups enters the variable destructor cleanup
4662 // - EmitCXXTryStmt emits the code for the catch body
4663 // - EmitCXXTryStmt close the RunCleanupsScope
4664
4665 VarDecl *CatchParam = S->getExceptionDecl();
4666 if (!CatchParam) {
4667 llvm::Value *Exn = CGF.getExceptionFromSlot();
4668 CallBeginCatch(CGF, Exn, true);
4669 return;
4670 }
4671
4672 // Emit the local.
4673 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4674 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4675 CGF.EmitAutoVarCleanups(var);
4676}
4677
4678/// Get or define the following function:
4679/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4680/// This code is used only in C++.
4681static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4682 ASTContext &C = CGM.getContext();
4684 C.VoidTy, {C.getPointerType(C.CharTy)});
4685 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
4686 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4687 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4688 llvm::Function *fn =
4689 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4690 if (fn->empty()) {
4691 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
4692 fn->setDoesNotThrow();
4693 fn->setDoesNotReturn();
4694
4695 // What we really want is to massively penalize inlining without
4696 // forbidding it completely. The difference between that and
4697 // 'noinline' is negligible.
4698 fn->addFnAttr(llvm::Attribute::NoInline);
4699
4700 // Allow this function to be shared across translation units, but
4701 // we don't want it to turn into an exported symbol.
4702 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4703 fn->setVisibility(llvm::Function::HiddenVisibility);
4704 if (CGM.supportsCOMDAT())
4705 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4706
4707 // Set up the function.
4708 llvm::BasicBlock *entry =
4709 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4710 CGBuilderTy builder(CGM, entry);
4711
4712 // Pull the exception pointer out of the parameter list.
4713 llvm::Value *exn = &*fn->arg_begin();
4714
4715 // Call __cxa_begin_catch(exn).
4716 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4717 catchCall->setDoesNotThrow();
4718 catchCall->setCallingConv(CGM.getRuntimeCC());
4719
4720 // Call std::terminate().
4721 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4722 termCall->setDoesNotThrow();
4723 termCall->setDoesNotReturn();
4724 termCall->setCallingConv(CGM.getRuntimeCC());
4725
4726 // std::terminate cannot return.
4727 builder.CreateUnreachable();
4728 }
4729 return fnRef;
4730}
4731
4732llvm::CallInst *
4733ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4734 llvm::Value *Exn) {
4735 // In C++, we want to call __cxa_begin_catch() before terminating.
4736 if (Exn) {
4737 assert(CGF.CGM.getLangOpts().CPlusPlus);
4739 }
4740 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4741}
4742
4743std::pair<llvm::Value *, const CXXRecordDecl *>
4744ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4745 const CXXRecordDecl *RD) {
4746 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4747}
4748
4749void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4750 const CXXCatchStmt *C) {
4751 if (CGF.getTarget().hasFeature("exception-handling"))
4752 CGF.EHStack.pushCleanup<CatchRetScope>(
4753 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4754 ItaniumCXXABI::emitBeginCatch(CGF, C);
4755}
4756
4757llvm::CallInst *
4758WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4759 llvm::Value *Exn) {
4760 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4761 // the violating exception to mark it handled, but it is currently hard to do
4762 // with wasm EH instruction structure with catch/catch_all, we just call
4763 // std::terminate and ignore the violating exception as in CGCXXABI.
4764 // TODO Consider code transformation that makes calling __clang_call_terminate
4765 // possible.
4767}
4768
4769/// Register a global destructor as best as we know how.
4770void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4771 llvm::FunctionCallee Dtor,
4772 llvm::Constant *Addr) {
4773 if (D.getTLSKind() != VarDecl::TLS_None) {
4774 // atexit routine expects "int(*)(int,...)"
4775 llvm::FunctionType *FTy =
4776 llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4777 llvm::PointerType *FpTy = FTy->getPointerTo();
4778
4779 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4780 llvm::FunctionType *AtExitTy =
4781 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4782
4783 // Fetch the actual function.
4784 llvm::FunctionCallee AtExit =
4785 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4786
4787 // Create __dtor function for the var decl.
4788 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4789
4790 // Register above __dtor with atexit().
4791 // First param is flags and must be 0, second param is function ptr
4792 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4793 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4794
4795 // Cannot unregister TLS __dtor so done
4796 return;
4797 }
4798
4799 // Create __dtor function for the var decl.
4800 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4801
4802 // Register above __dtor with atexit().
4803 CGF.registerGlobalDtorWithAtExit(DtorStub);
4804
4805 // Emit __finalize function to unregister __dtor and (as appropriate) call
4806 // __dtor.
4807 emitCXXStermFinalizer(D, DtorStub, Addr);
4808}
4809
4810void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4811 llvm::Constant *addr) {
4812 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4813 SmallString<256> FnName;
4814 {
4815 llvm::raw_svector_ostream Out(FnName);
4816 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4817 }
4818
4819 // Create the finalization action associated with a variable.
4821 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4822 FTy, FnName.str(), FI, D.getLocation());
4823
4824 CodeGenFunction CGF(CGM);
4825
4826 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4828 D.getInit()->getExprLoc());
4829
4830 // The unatexit subroutine unregisters __dtor functions that were previously
4831 // registered by the atexit subroutine. If the referenced function is found,
4832 // the unatexit returns a value of 0, meaning that the cleanup is still
4833 // pending (and we should call the __dtor function).
4834 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4835
4836 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4837
4838 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4839 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4840
4841 // Check if unatexit returns a value of 0. If it does, jump to
4842 // DestructCallBlock, otherwise jump to EndBlock directly.
4843 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4844
4845 CGF.EmitBlock(DestructCallBlock);
4846
4847 // Emit the call to dtorStub.
4848 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4849
4850 // Make sure the call and the callee agree on calling convention.
4851 CI->setCallingConv(dtorStub->getCallingConv());
4852
4853 CGF.EmitBlock(EndBlock);
4854
4855 CGF.FinishFunction();
4856
4857 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4858 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4859 IPA->getPriority());
4861 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4862 // According to C++ [basic.start.init]p2, class template static data
4863 // members (i.e., implicitly or explicitly instantiated specializations)
4864 // have unordered initialization. As a consequence, we can put them into
4865 // their own llvm.global_dtors entry.
4866 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4867 } else {
4868 CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4869 }
4870}
#define V(N, I)
Definition: ASTContext.h:3217
static StructorCodegen getCodegenToUse(CodeGenModule &CGM, const CXXMethodDecl *MD)
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM)
Get or define the following function: void @__clang_call_terminate(i8* exn) nounwind noreturn This co...
static llvm::Value * performTypeAdjustment(CodeGenFunction &CGF, Address InitialPtr, int64_t NonVirtualAdjustment, int64_t VirtualAdjustment, bool IsReturnAdjustment)
static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type)
Compute the flags for a __pbase_type_info, and remove the corresponding pieces from Type.
static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty)
ShouldUseExternalRTTIDescriptor - Returns whether the type information for the given type exists some...
static bool IsIncompleteClassType(const RecordType *RecordTy)
IsIncompleteClassType - Returns whether the given record type is incomplete.
static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, SeenBases &Bases)
ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in abi::__vmi_class_type_info.
static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF)
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, llvm::FunctionCallee dtor, llvm::Constant *addr, bool TLS)
Register a global destructor using __cxa_atexit.
static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM)
static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty)
Return the linkage that the type info and type info name constants should have for the given type.
static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static llvm::Function * createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, StringRef FnName)
static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM)
static bool IsStandardLibraryRTTIDescriptor(QualType Ty)
IsStandardLibraryRTTIDescriptor - Returns whether the type information for the given type exists in t...
static llvm::Value * CallBeginCatch(CodeGenFunction &CGF, llvm::Value *Exn, bool EndMightThrow)
Emits a call to __cxa_begin_catch and enters a cleanup to call __cxa_end_catch.
static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static CharUnits computeOffsetHint(ASTContext &Context, const CXXRecordDecl *Src, const CXXRecordDecl *Dst)
Compute the src2dst_offset hint as described in the Itanium C++ ABI [2.9.7].
static bool isThreadWrapperReplaceable(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD)
static void InitCatchParam(CodeGenFunction &CGF, const VarDecl &CatchParam, Address ParamAddr, SourceLocation Loc)
A "special initializer" callback for initializing a catch parameter during catch initialization.
static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty)
TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type info for that type is de...
static bool CanUseSingleInheritance(const CXXRecordDecl *RD)
static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM)
static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
Get the appropriate linkage for the wrapper function.
static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM)
static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static bool ContainsIncompleteClassType(QualType Ty)
ContainsIncompleteClassType - Returns whether the given type contains an incomplete class type.
static void emitConstructorDestructorAlias(CodeGenModule &CGM, GlobalDecl AliasDecl, GlobalDecl TargetDecl)
static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM)
static void dtorTy(Block *, char *Ptr, const Descriptor *)
Definition: Descriptor.cpp:25
int Priority
Definition: Format.cpp:2778
unsigned Offset
Definition: Format.cpp:2776
static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD)
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D)
Determine what kind of template specialization the given declaration is.
#define CXXABI(Name, Str)
Definition: TargetCXXABI.h:32
C Language Family Type Representation.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
const ValueDecl * getMemberPointerDecl() const
Definition: APValue.cpp:1047
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
CanQualType LongTy
Definition: ASTContext.h:1087
QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl, ObjCInterfaceDecl *PrevDecl=nullptr) const
getObjCInterfaceType - Return the unique reference to the type for the specified ObjC interface decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2505
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1105
IdentifierTable & Idents
Definition: ASTContext.h:631
const LangOptions & getLangOpts() const
Definition: ASTContext.h:762
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType CharTy
Definition: ASTContext.h:1080
CanQualType IntTy
Definition: ASTContext.h:1087
CharUnits getExnObjectAlignment() const
Return the alignment (in bytes) of the thrown exception object.
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getPreferredTypeAlignInChars(QualType T) const
Return the PreferredAlignment of a (complete) type T, in characters.
Definition: ASTContext.h:2338
CanQualType VoidTy
Definition: ASTContext.h:1078
CanQualType UnsignedIntTy
Definition: ASTContext.h:1088
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:744
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
TargetCXXABI::Kind getCXXABIKind() const
Return the C++ ABI kind that should be used.
Definition: ASTContext.cpp:968
CanQualType LongLongTy
Definition: ASTContext.h:1087
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
This class is used for builtin types like 'int'.
Definition: Type.h:2634
Kind getKind() const
Definition: Type.h:2672
Implements C++ ABI-specific semantic analysis functions.
Definition: CXXABI.h:29
Represents a path from a specific derived class (which is not represented as part of the path) to a p...
BasePaths - Represents the set of paths from a derived class to one of its (direct or indirect) bases...
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:245
CXXCatchStmt - This represents a C++ catch block.
Definition: StmtCXX.h:28
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2474
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2473
FunctionDecl * getOperatorDelete() const
Definition: ExprCXX.h:2512
bool isGlobalDelete() const
Definition: ExprCXX.h:2498
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2738
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:176
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2018
bool isVirtual() const
Definition: DeclCXX.h:2062
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2133
bool isInstance() const
Definition: DeclCXX.h:2045
CXXMethodDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: DeclCXX.h:2103
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2199
Represents a C++ struct/union/class.
Definition: DeclCXX.h:254
base_class_range bases()
Definition: DeclCXX.h:602
unsigned getNumBases() const
Retrieves the number of base classes of this class.
Definition: DeclCXX.h:596
base_class_iterator bases_begin()
Definition: DeclCXX.h:609
base_class_range vbases()
Definition: DeclCXX.h:619
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition: DeclCXX.h:1199
bool isDynamicClass() const
Definition: DeclCXX.h:568
bool hasDefinition() const
Definition: DeclCXX.h:555
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:617
bool isDerivedFrom(const CXXRecordDecl *Base) const
Determine whether this class is derived from the class Base.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1187
const Expr * getSubExpr() const
Definition: ExprCXX.h:1207
Qualifiers getQualifiers() const
Retrieve all qualifiers.
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3482
CastKind getCastKind() const
Definition: Expr.h:3526
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
std::string SymbolPartition
The name of the partition that symbols are assigned to, specified with -fsymbol-partition (see https:...
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
An aligned address.
Definition: Address.h:29
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:81
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:65
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:71
llvm::Value * getPointer() const
Definition: Address.h:54
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:60
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:597
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:836
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:99
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:280
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:169
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:89
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:234
Address CreateGEP(Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:267
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
virtual llvm::Value * EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value, QualType SrcRecordTy, QualType DestTy)=0
virtual void EmitCXXConstructors(const CXXConstructorDecl *D)=0
Emit constructor variants required by this ABI.
virtual llvm::Constant * getAddrOfRTTIDescriptor(QualType Ty)=0
virtual llvm::Value * performReturnAdjustment(CodeGenFunction &CGF, Address Ret, const ReturnAdjustment &RA)=0
virtual llvm::Value * getVTableAddressPointInStructor(CodeGenFunction &CGF, const CXXRecordDecl *RD, BaseSubobject Base, const CXXRecordDecl *NearestVBase)=0
Get the address point of the vtable for the given base subobject while building a constructor or a de...
virtual void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C)=0
virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn)=0
virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const =0
virtual bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr)=0
Checks if ABI requires extra virtual offset for vtable field.
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)=0
Emits the guarded initializer and destructor setup for the given variable, given that it couldn't be ...
virtual void EmitCXXDestructors(const CXXDestructorDecl *D)=0
Emit destructor variants required by this ABI.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const =0
Returns true if the given destructor type should be emitted as a linkonce delegating thunk,...
virtual bool NeedsVTTParameter(GlobalDecl GD)
Return whether the given global decl needs a VTT parameter.
Definition: CGCXXABI.cpp:322
virtual llvm::CallInst * emitTerminateForUnexpectedException(CodeGenFunction &CGF, llvm::Value *Exn)
Definition: CGCXXABI.cpp:327
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Definition: CGCXXABI.h:157
@ RAA_Indirect
Pass it as a pointer to temporary memory.
Definition: CGCXXABI.h:165
virtual llvm::Type * ConvertMemberPointerType(const MemberPointerType *MPT)
Find the LLVM type used to represent the given member pointer type.
Definition: CGCXXABI.cpp:37
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:98
virtual StringRef GetPureVirtualCallName()=0
Gets the pure virtual member call function.
virtual CharUnits getArrayCookieSizeImpl(QualType elementType)
Returns the extra size required in order to store the array cookie for the given type.
Definition: CGCXXABI.cpp:214
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const =0
Determine whether it's possible to emit a vtable for RD, even though we do not know that the vtable h...
virtual StringRef GetDeletedVirtualCallName()=0
Gets the deleted virtual member call name.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:90
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
bool isEmittedWithConstantInitializer(const VarDecl *VD, bool InspectInitForWeakDef=false) const
Determine whether we will definitely emit this variable with a constant initializer,...
Definition: CGCXXABI.cpp:169
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:80
virtual llvm::Constant * EmitMemberPointer(const APValue &MP, QualType MPT)
Create a member pointer for the given member pointer constant.
Definition: CGCXXABI.cpp:112
virtual llvm::Constant * getVTableAddressPoint(BaseSubobject Base, const CXXRecordDecl *VTableClass)=0
Get the address point of the vtable for the given base subobject.
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
virtual llvm::Value * readArrayCookieImpl(CodeGenFunction &IGF, Address ptr, CharUnits cookieSize)
Reads the array cookie for an allocation which is known to have one.
Definition: CGCXXABI.cpp:270
virtual llvm::Value * EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, const MemberPointerType *MPT)
Calculate an l-value from an object and a data member pointer.
Definition: CGCXXABI.cpp:58
virtual llvm::Value * getCXXDestructorImplicitParam(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating)=0
Get the implicit (second) parameter that comes after the "this" pointer, or nullptr if there is isn't...
virtual std::pair< llvm::Value *, const CXXRecordDecl * > LoadVTablePtr(CodeGenFunction &CGF, Address This, const CXXRecordDecl *RD)=0
Load a vtable from This, an object of polymorphic type RD, or from one of its virtual bases if it doe...
virtual llvm::Constant * getVTableAddressPointForConstExpr(BaseSubobject Base, const CXXRecordDecl *VTableClass)=0
Get the address point of the vtable for the given base subobject while building a constexpr.
virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, bool ReturnAdjustment)=0
virtual llvm::Value * EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, Address This, DeleteOrMemberCallExpr E)=0
Emit the ABI-specific virtual destructor call.
bool mayNeedDestruction(const VarDecl *VD) const
Definition: CGCXXABI.cpp:156
virtual bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass)=0
Checks if ABI requires to initialize vptrs for given dynamic class.
virtual llvm::Value * EmitDynamicCastCall(CodeGenFunction &CGF, Address Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd)=0
virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E)=0
virtual llvm::Value * GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl)=0
virtual bool isThisCompleteObject(GlobalDecl GD) const =0
Determine whether there's something special about the rules of the ABI tell us that 'this' is a compl...
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
virtual void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, Address Ptr, QualType ElementType, const CXXDestructorDecl *Dtor)=0
virtual CatchTypeInfo getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType)=0
virtual void EmitThreadLocalInitFuncs(CodeGenModule &CGM, ArrayRef< const VarDecl * > CXXThreadLocals, ArrayRef< llvm::Function * > CXXThreadLocalInits, ArrayRef< const VarDecl * > CXXThreadLocalInitVars)=0
Emits ABI-required functions necessary to initialize thread_local variables in this translation unit.
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)=0
Emit the destructor call.
virtual llvm::GlobalVariable * getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset)=0
Get the address of the vtable for the given record decl which should be used for the vptr at the give...
virtual bool EmitBadCastCall(CodeGenFunction &CGF)=0
virtual llvm::Constant * EmitMemberDataPointer(const MemberPointerType *MPT, CharUnits offset)
Create a member pointer for the given field.
Definition: CGCXXABI.cpp:107
virtual llvm::Value * EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, Address ThisPtr, llvm::Type *StdTypeInfoPtrTy)=0
virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD)=0
Emit any tables needed to implement virtual inheritance.
virtual void emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD)=0
Emits the VTable definitions required for the given record type.
virtual CGCallee EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E, Address This, llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr, const MemberPointerType *MPT)
Load a member function from an object and a member function pointer.
Definition: CGCXXABI.cpp:41
virtual void emitCXXStructor(GlobalDecl GD)=0
Emit a single constructor/destructor with the given type from a C++ constructor Decl.
virtual bool exportThunk()=0
virtual void EmitBadTypeidCall(CodeGenFunction &CGF)=0
virtual bool isZeroInitializable(const MemberPointerType *MPT)
Return true if the given member pointer can be zero-initialized (in the C++ sense) with an LLVM zeroi...
Definition: CGCXXABI.cpp:116
virtual bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy)=0
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition: CGCXXABI.cpp:67
virtual llvm::Constant * EmitMemberFunctionPointer(const CXXMethodDecl *MD)
Create a member pointer for the given method.
Definition: CGCXXABI.cpp:102
virtual Address InitializeArrayCookie(CodeGenFunction &CGF, Address NewPtr, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType)
Initialize the array cookie for the given allocation.
Definition: CGCXXABI.cpp:219
virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, QualType SrcRecordTy)=0
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
virtual llvm::Value * performThisAdjustment(CodeGenFunction &CGF, Address This, const ThisAdjustment &TA)=0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:117
virtual AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating)=0
All available information about a concrete callee.
Definition: CGCall.h:60
static CGCallee forVirtual(const CallExpr *CE, GlobalDecl MD, Address Addr, llvm::FunctionType *FTy)
Definition: CGCall.h:140
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:130
CGFunctionInfo - Class to encapsulate the information about a function definition.
CanQualType getReturnType() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void GenerateCXXGlobalInitFunc(llvm::Function *Fn, ArrayRef< llvm::Function * > CXXThreadLocals, ConstantAddress Guard=ConstantAddress::invalid())
GenerateCXXGlobalInitFunc - Generates code for initializing global variables.
void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, llvm::Value *CompletePtr, QualType ElementType)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void PopCleanupBlock(bool FallThroughIsBranchThrough=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitAnyExprToExn(const Expr *E, Address Addr)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Value * EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable, llvm::Type *VTableTy, uint64_t VTableByteOffset)
Emit a type checked load from the given vtable.
llvm::Value * GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, bool Delegating)
GetVTTParameter - Return the VTT parameter that should be passed to a base constructor/destructor wit...
llvm::Type * ConvertTypeForMem(QualType T)