clang 18.0.0git
ItaniumCXXABI.cpp
Go to the documentation of this file.
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGRecordLayout.h"
23#include "CGVTables.h"
24#include "CodeGenFunction.h"
25#include "CodeGenModule.h"
26#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/Mangle.h"
29#include "clang/AST/StmtCXX.h"
30#include "clang/AST/Type.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/ScopedPrinter.h"
38
39#include <optional>
40
41using namespace clang;
42using namespace CodeGen;
43
44namespace {
45class ItaniumCXXABI : public CodeGen::CGCXXABI {
46 /// VTables - All the vtables which have been defined.
47 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
48
49 /// All the thread wrapper functions that have been used.
51 ThreadWrappers;
52
53protected:
54 bool UseARMMethodPtrABI;
55 bool UseARMGuardVarABI;
56 bool Use32BitVTableOffsetABI;
57
59 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
60 }
61
62public:
63 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
64 bool UseARMMethodPtrABI = false,
65 bool UseARMGuardVarABI = false) :
66 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
67 UseARMGuardVarABI(UseARMGuardVarABI),
68 Use32BitVTableOffsetABI(false) { }
69
70 bool classifyReturnType(CGFunctionInfo &FI) const override;
71
72 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
73 // If C++ prohibits us from making a copy, pass by address.
74 if (!RD->canPassInRegisters())
75 return RAA_Indirect;
76 return RAA_Default;
77 }
78
79 bool isThisCompleteObject(GlobalDecl GD) const override {
80 // The Itanium ABI has separate complete-object vs. base-object
81 // variants of both constructors and destructors.
82 if (isa<CXXDestructorDecl>(GD.getDecl())) {
83 switch (GD.getDtorType()) {
84 case Dtor_Complete:
85 case Dtor_Deleting:
86 return true;
87
88 case Dtor_Base:
89 return false;
90
91 case Dtor_Comdat:
92 llvm_unreachable("emitting dtor comdat as function?");
93 }
94 llvm_unreachable("bad dtor kind");
95 }
96 if (isa<CXXConstructorDecl>(GD.getDecl())) {
97 switch (GD.getCtorType()) {
98 case Ctor_Complete:
99 return true;
100
101 case Ctor_Base:
102 return false;
103
106 llvm_unreachable("closure ctors in Itanium ABI?");
107
108 case Ctor_Comdat:
109 llvm_unreachable("emitting ctor comdat as function?");
110 }
111 llvm_unreachable("bad dtor kind");
112 }
113
114 // No other kinds.
115 return false;
116 }
117
118 bool isZeroInitializable(const MemberPointerType *MPT) override;
119
120 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
121
124 const Expr *E,
125 Address This,
126 llvm::Value *&ThisPtrForCall,
127 llvm::Value *MemFnPtr,
128 const MemberPointerType *MPT) override;
129
130 llvm::Value *
133 llvm::Value *MemPtr,
134 const MemberPointerType *MPT) override;
135
137 const CastExpr *E,
138 llvm::Value *Src) override;
139 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
140 llvm::Constant *Src) override;
141
142 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
143
144 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
145 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
146 CharUnits offset) override;
147 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
148 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
150
152 llvm::Value *L, llvm::Value *R,
153 const MemberPointerType *MPT,
154 bool Inequality) override;
155
157 llvm::Value *Addr,
158 const MemberPointerType *MPT) override;
159
161 Address Ptr, QualType ElementType,
162 const CXXDestructorDecl *Dtor) override;
163
164 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
165 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
166
167 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
168
169 llvm::CallInst *
171 llvm::Value *Exn) override;
172
173 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
174 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
177 QualType CatchHandlerType) override {
179 }
180
181 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
182 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
183 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
184 Address ThisPtr,
185 llvm::Type *StdTypeInfoPtrTy) override;
186
187 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
188 QualType SrcRecordTy) override;
189
190 /// Determine whether we know that all instances of type RecordTy will have
191 /// the same vtable pointer values, that is distinct from all other vtable
192 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
193 /// practice in some cases due to language extensions.
194 bool hasUniqueVTablePointer(QualType RecordTy) {
195 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
196
197 // Under -fapple-kext, multiple definitions of the same vtable may be
198 // emitted.
199 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
200 getContext().getLangOpts().AppleKext)
201 return false;
202
203 // If the type_info* would be null, the vtable might be merged with that of
204 // another type.
205 if (!CGM.shouldEmitRTTI())
206 return false;
207
208 // If there's only one definition of the vtable in the program, it has a
209 // unique address.
210 if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD)))
211 return true;
212
213 // Even if there are multiple definitions of the vtable, they are required
214 // by the ABI to use the same symbol name, so should be merged at load
215 // time. However, if the class has hidden visibility, there can be
216 // different versions of the class in different modules, and the ABI
217 // library might treat them as being the same.
218 if (CGM.GetLLVMVisibility(RD->getVisibility()) !=
219 llvm::GlobalValue::DefaultVisibility)
220 return false;
221
222 return true;
223 }
224
225 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
226 return hasUniqueVTablePointer(DestRecordTy);
227 }
228
230 QualType SrcRecordTy, QualType DestTy,
231 QualType DestRecordTy,
232 llvm::BasicBlock *CastEnd) override;
233
234 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
235 QualType SrcRecordTy, QualType DestTy,
236 QualType DestRecordTy,
237 llvm::BasicBlock *CastSuccess,
238 llvm::BasicBlock *CastFail) override;
239
241 QualType SrcRecordTy) override;
242
243 bool EmitBadCastCall(CodeGenFunction &CGF) override;
244
245 llvm::Value *
247 const CXXRecordDecl *ClassDecl,
248 const CXXRecordDecl *BaseClassDecl) override;
249
250 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
251
252 AddedStructorArgCounts
254 SmallVectorImpl<CanQualType> &ArgTys) override;
255
257 CXXDtorType DT) const override {
258 // Itanium does not emit any destructor variant as an inline thunk.
259 // Delegating may occur as an optimization, but all variants are either
260 // emitted with external linkage or as linkonce if they are inline and used.
261 return false;
262 }
263
264 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
265
267 FunctionArgList &Params) override;
268
270
271 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
272 const CXXConstructorDecl *D,
274 bool ForVirtualBase,
275 bool Delegating) override;
276
278 const CXXDestructorDecl *DD,
280 bool ForVirtualBase,
281 bool Delegating) override;
282
284 CXXDtorType Type, bool ForVirtualBase,
285 bool Delegating, Address This,
286 QualType ThisTy) override;
287
289 const CXXRecordDecl *RD) override;
290
292 CodeGenFunction::VPtr Vptr) override;
293
294 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
295 return true;
296 }
297
298 llvm::Constant *
300 const CXXRecordDecl *VTableClass) override;
301
303 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
304 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
305
306 llvm::Value *getVTableAddressPointInStructorWithVTT(
307 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
308 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
309
310 llvm::Constant *
312 const CXXRecordDecl *VTableClass) override;
313
314 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
315 CharUnits VPtrOffset) override;
316
318 Address This, llvm::Type *Ty,
319 SourceLocation Loc) override;
320
322 const CXXDestructorDecl *Dtor,
323 CXXDtorType DtorType, Address This,
324 DeleteOrMemberCallExpr E) override;
325
326 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
327
328 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
329 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
330
331 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
332 bool ReturnAdjustment) override {
333 // Allow inlining of thunks by emitting them with available_externally
334 // linkage together with vtables when needed.
335 if (ForVTable && !Thunk->hasLocalLinkage())
336 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
337 CGM.setGVProperties(Thunk, GD);
338 }
339
340 bool exportThunk() override { return true; }
341
342 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
343 const ThisAdjustment &TA) override;
344
345 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
346 const ReturnAdjustment &RA) override;
347
349 FunctionArgList &Args) const override {
350 assert(!Args.empty() && "expected the arglist to not be empty!");
351 return Args.size() - 1;
352 }
353
354 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
355 StringRef GetDeletedVirtualCallName() override
356 { return "__cxa_deleted_virtual"; }
357
358 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
360 Address NewPtr,
361 llvm::Value *NumElements,
362 const CXXNewExpr *expr,
363 QualType ElementType) override;
364 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
365 Address allocPtr,
366 CharUnits cookieSize) override;
367
368 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
369 llvm::GlobalVariable *DeclPtr,
370 bool PerformInit) override;
371 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
372 llvm::FunctionCallee dtor,
373 llvm::Constant *addr) override;
374
375 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
376 llvm::Value *Val);
378 CodeGenModule &CGM,
379 ArrayRef<const VarDecl *> CXXThreadLocals,
380 ArrayRef<llvm::Function *> CXXThreadLocalInits,
381 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
382
383 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
386 }
388 QualType LValType) override;
389
390 bool NeedsVTTParameter(GlobalDecl GD) override;
391
392 /**************************** RTTI Uniqueness ******************************/
393
394protected:
395 /// Returns true if the ABI requires RTTI type_info objects to be unique
396 /// across a program.
397 virtual bool shouldRTTIBeUnique() const { return true; }
398
399public:
400 /// What sort of unique-RTTI behavior should we use?
401 enum RTTIUniquenessKind {
402 /// We are guaranteeing, or need to guarantee, that the RTTI string
403 /// is unique.
404 RUK_Unique,
405
406 /// We are not guaranteeing uniqueness for the RTTI string, so we
407 /// can demote to hidden visibility but must use string comparisons.
408 RUK_NonUniqueHidden,
409
410 /// We are not guaranteeing uniqueness for the RTTI string, so we
411 /// have to use string comparisons, but we also have to emit it with
412 /// non-hidden visibility.
413 RUK_NonUniqueVisible
414 };
415
416 /// Return the required visibility status for the given type and linkage in
417 /// the current ABI.
418 RTTIUniquenessKind
419 classifyRTTIUniqueness(QualType CanTy,
420 llvm::GlobalValue::LinkageTypes Linkage) const;
421 friend class ItaniumRTTIBuilder;
422
423 void emitCXXStructor(GlobalDecl GD) override;
424
425 std::pair<llvm::Value *, const CXXRecordDecl *>
427 const CXXRecordDecl *RD) override;
428
429 private:
430 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
431 const auto &VtableLayout =
432 CGM.getItaniumVTableContext().getVTableLayout(RD);
433
434 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
435 // Skip empty slot.
436 if (!VtableComponent.isUsedFunctionPointerKind())
437 continue;
438
439 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
440 if (!Method->getCanonicalDecl()->isInlined())
441 continue;
442
443 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
444 auto *Entry = CGM.GetGlobalValue(Name);
445 // This checks if virtual inline function has already been emitted.
446 // Note that it is possible that this inline function would be emitted
447 // after trying to emit vtable speculatively. Because of this we do
448 // an extra pass after emitting all deferred vtables to find and emit
449 // these vtables opportunistically.
450 if (!Entry || Entry->isDeclaration())
451 return true;
452 }
453 return false;
454 }
455
456 bool isVTableHidden(const CXXRecordDecl *RD) const {
457 const auto &VtableLayout =
458 CGM.getItaniumVTableContext().getVTableLayout(RD);
459
460 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
461 if (VtableComponent.isRTTIKind()) {
462 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
463 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
464 return true;
465 } else if (VtableComponent.isUsedFunctionPointerKind()) {
466 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
467 if (Method->getVisibility() == Visibility::HiddenVisibility &&
468 !Method->isDefined())
469 return true;
470 }
471 }
472 return false;
473 }
474};
475
476class ARMCXXABI : public ItaniumCXXABI {
477public:
478 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
479 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
480 /*UseARMGuardVarABI=*/true) {}
481
482 bool constructorsAndDestructorsReturnThis() const override { return true; }
483
484 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
485 QualType ResTy) override;
486
487 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
488 Address InitializeArrayCookie(CodeGenFunction &CGF,
489 Address NewPtr,
490 llvm::Value *NumElements,
491 const CXXNewExpr *expr,
492 QualType ElementType) override;
493 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
494 CharUnits cookieSize) override;
495};
496
497class AppleARM64CXXABI : public ARMCXXABI {
498public:
499 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
500 Use32BitVTableOffsetABI = true;
501 }
502
503 // ARM64 libraries are prepared for non-unique RTTI.
504 bool shouldRTTIBeUnique() const override { return false; }
505};
506
507class FuchsiaCXXABI final : public ItaniumCXXABI {
508public:
509 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
510 : ItaniumCXXABI(CGM) {}
511
512private:
513 bool constructorsAndDestructorsReturnThis() const override { return true; }
514};
515
516class WebAssemblyCXXABI final : public ItaniumCXXABI {
517public:
518 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
519 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
520 /*UseARMGuardVarABI=*/true) {}
521 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
522 llvm::CallInst *
523 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
524 llvm::Value *Exn) override;
525
526private:
527 bool constructorsAndDestructorsReturnThis() const override { return true; }
528 bool canCallMismatchedFunctionType() const override { return false; }
529};
530
531class XLCXXABI final : public ItaniumCXXABI {
532public:
533 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
534 : ItaniumCXXABI(CGM) {}
535
536 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
537 llvm::FunctionCallee dtor,
538 llvm::Constant *addr) override;
539
540 bool useSinitAndSterm() const override { return true; }
541
542private:
543 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
544 llvm::Constant *addr);
545};
546}
547
549 switch (CGM.getContext().getCXXABIKind()) {
550 // For IR-generation purposes, there's no significant difference
551 // between the ARM and iOS ABIs.
552 case TargetCXXABI::GenericARM:
553 case TargetCXXABI::iOS:
554 case TargetCXXABI::WatchOS:
555 return new ARMCXXABI(CGM);
556
557 case TargetCXXABI::AppleARM64:
558 return new AppleARM64CXXABI(CGM);
559
560 case TargetCXXABI::Fuchsia:
561 return new FuchsiaCXXABI(CGM);
562
563 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
564 // include the other 32-bit ARM oddities: constructor/destructor return values
565 // and array cookies.
566 case TargetCXXABI::GenericAArch64:
567 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
568 /*UseARMGuardVarABI=*/true);
569
570 case TargetCXXABI::GenericMIPS:
571 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
572
573 case TargetCXXABI::WebAssembly:
574 return new WebAssemblyCXXABI(CGM);
575
576 case TargetCXXABI::XL:
577 return new XLCXXABI(CGM);
578
579 case TargetCXXABI::GenericItanium:
580 if (CGM.getContext().getTargetInfo().getTriple().getArch()
581 == llvm::Triple::le32) {
582 // For PNaCl, use ARM-style method pointers so that PNaCl code
583 // does not assume anything about the alignment of function
584 // pointers.
585 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
586 }
587 return new ItaniumCXXABI(CGM);
588
589 case TargetCXXABI::Microsoft:
590 llvm_unreachable("Microsoft ABI is not Itanium-based");
591 }
592 llvm_unreachable("bad ABI kind");
593}
594
595llvm::Type *
596ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
597 if (MPT->isMemberDataPointer())
598 return CGM.PtrDiffTy;
599 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
600}
601
602/// In the Itanium and ARM ABIs, method pointers have the form:
603/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
604///
605/// In the Itanium ABI:
606/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
607/// - the this-adjustment is (memptr.adj)
608/// - the virtual offset is (memptr.ptr - 1)
609///
610/// In the ARM ABI:
611/// - method pointers are virtual if (memptr.adj & 1) is nonzero
612/// - the this-adjustment is (memptr.adj >> 1)
613/// - the virtual offset is (memptr.ptr)
614/// ARM uses 'adj' for the virtual flag because Thumb functions
615/// may be only single-byte aligned.
616///
617/// If the member is virtual, the adjusted 'this' pointer points
618/// to a vtable pointer from which the virtual offset is applied.
619///
620/// If the member is non-virtual, memptr.ptr is the address of
621/// the function to call.
622CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
623 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
624 llvm::Value *&ThisPtrForCall,
625 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
626 CGBuilderTy &Builder = CGF.Builder;
627
628 const FunctionProtoType *FPT =
630 auto *RD =
631 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
632
633 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
634
635 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
636 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
637 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
638
639 // Extract memptr.adj, which is in the second field.
640 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
641
642 // Compute the true adjustment.
643 llvm::Value *Adj = RawAdj;
644 if (UseARMMethodPtrABI)
645 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
646
647 // Apply the adjustment and cast back to the original struct type
648 // for consistency.
649 llvm::Value *This = ThisAddr.getPointer();
650 This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj);
651 ThisPtrForCall = This;
652
653 // Load the function pointer.
654 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
655
656 // If the LSB in the function pointer is 1, the function pointer points to
657 // a virtual function.
658 llvm::Value *IsVirtual;
659 if (UseARMMethodPtrABI)
660 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
661 else
662 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
663 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
664 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
665
666 // In the virtual path, the adjustment left 'This' pointing to the
667 // vtable of the correct base subobject. The "function pointer" is an
668 // offset within the vtable (+1 for the virtual flag on non-ARM).
669 CGF.EmitBlock(FnVirtual);
670
671 // Cast the adjusted this to a pointer to vtable pointer and load.
672 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
673 CharUnits VTablePtrAlign =
674 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
675 CGF.getPointerAlign());
676 llvm::Value *VTable = CGF.GetVTablePtr(
677 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
678
679 // Apply the offset.
680 // On ARM64, to reserve extra space in virtual member function pointers,
681 // we only pay attention to the low 32 bits of the offset.
682 llvm::Value *VTableOffset = FnAsInt;
683 if (!UseARMMethodPtrABI)
684 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
685 if (Use32BitVTableOffsetABI) {
686 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
687 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
688 }
689
690 // Check the address of the function pointer if CFI on member function
691 // pointers is enabled.
692 llvm::Constant *CheckSourceLocation;
693 llvm::Constant *CheckTypeDesc;
694 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
695 CGM.HasHiddenLTOVisibility(RD);
696 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
697 CGM.HasHiddenLTOVisibility(RD);
698 bool ShouldEmitWPDInfo =
699 CGM.getCodeGenOpts().WholeProgramVTables &&
700 // Don't insert type tests if we are forcing public visibility.
701 !CGM.AlwaysHasLTOVisibilityPublic(RD);
702 llvm::Value *VirtualFn = nullptr;
703
704 {
705 CodeGenFunction::SanitizerScope SanScope(&CGF);
706 llvm::Value *TypeId = nullptr;
707 llvm::Value *CheckResult = nullptr;
708
709 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
710 // If doing CFI, VFE or WPD, we will need the metadata node to check
711 // against.
712 llvm::Metadata *MD =
713 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
714 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
715 }
716
717 if (ShouldEmitVFEInfo) {
718 llvm::Value *VFPAddr =
719 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
720
721 // If doing VFE, load from the vtable with a type.checked.load intrinsic
722 // call. Note that we use the GEP to calculate the address to load from
723 // and pass 0 as the offset to the intrinsic. This is because every
724 // vtable slot of the correct type is marked with matching metadata, and
725 // we know that the load must be from one of these slots.
726 llvm::Value *CheckedLoad = Builder.CreateCall(
727 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
728 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
729 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
730 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
731 } else {
732 // When not doing VFE, emit a normal load, as it allows more
733 // optimisations than type.checked.load.
734 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
735 llvm::Value *VFPAddr =
736 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
737 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
738 ? llvm::Intrinsic::type_test
739 : llvm::Intrinsic::public_type_test;
740
741 CheckResult =
742 Builder.CreateCall(CGM.getIntrinsic(IID), {VFPAddr, TypeId});
743 }
744
745 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
746 VirtualFn = CGF.Builder.CreateCall(
747 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
748 {VTableOffset->getType()}),
749 {VTable, VTableOffset});
750 } else {
751 llvm::Value *VFPAddr =
752 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
753 VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
754 CGF.getPointerAlign(),
755 "memptr.virtualfn");
756 }
757 }
758 assert(VirtualFn && "Virtual fuction pointer not created!");
759 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
760 CheckResult) &&
761 "Check result required but not created!");
762
763 if (ShouldEmitCFICheck) {
764 // If doing CFI, emit the check.
765 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
766 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
767 llvm::Constant *StaticData[] = {
768 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
769 CheckSourceLocation,
770 CheckTypeDesc,
771 };
772
773 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
774 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
775 } else {
776 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
777 CGM.getLLVMContext(),
778 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
779 llvm::Value *ValidVtable = Builder.CreateCall(
780 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
781 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
782 SanitizerHandler::CFICheckFail, StaticData,
783 {VTable, ValidVtable});
784 }
785
786 FnVirtual = Builder.GetInsertBlock();
787 }
788 } // End of sanitizer scope
789
790 CGF.EmitBranch(FnEnd);
791
792 // In the non-virtual path, the function pointer is actually a
793 // function pointer.
794 CGF.EmitBlock(FnNonVirtual);
795 llvm::Value *NonVirtualFn =
796 Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
797
798 // Check the function pointer if CFI on member function pointers is enabled.
799 if (ShouldEmitCFICheck) {
801 if (RD->hasDefinition()) {
802 CodeGenFunction::SanitizerScope SanScope(&CGF);
803
804 llvm::Constant *StaticData[] = {
805 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
806 CheckSourceLocation,
807 CheckTypeDesc,
808 };
809
810 llvm::Value *Bit = Builder.getFalse();
811 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
812 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
813 getContext().getMemberPointerType(
814 MPT->getPointeeType(),
815 getContext().getRecordType(Base).getTypePtr()));
816 llvm::Value *TypeId =
817 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
818
819 llvm::Value *TypeTest =
820 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
821 {NonVirtualFn, TypeId});
822 Bit = Builder.CreateOr(Bit, TypeTest);
823 }
824
825 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
826 SanitizerHandler::CFICheckFail, StaticData,
827 {NonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
828
829 FnNonVirtual = Builder.GetInsertBlock();
830 }
831 }
832
833 // We're done.
834 CGF.EmitBlock(FnEnd);
835 llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
836 CalleePtr->addIncoming(VirtualFn, FnVirtual);
837 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
838
839 CGCallee Callee(FPT, CalleePtr);
840 return Callee;
841}
842
843/// Compute an l-value by applying the given pointer-to-member to a
844/// base object.
845llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
846 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
847 const MemberPointerType *MPT) {
848 assert(MemPtr->getType() == CGM.PtrDiffTy);
849
850 CGBuilderTy &Builder = CGF.Builder;
851
852 // Apply the offset, which we assume is non-null.
853 return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr,
854 "memptr.offset");
855}
856
857/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
858/// conversion.
859///
860/// Bitcast conversions are always a no-op under Itanium.
861///
862/// Obligatory offset/adjustment diagram:
863/// <-- offset --> <-- adjustment -->
864/// |--------------------------|----------------------|--------------------|
865/// ^Derived address point ^Base address point ^Member address point
866///
867/// So when converting a base member pointer to a derived member pointer,
868/// we add the offset to the adjustment because the address point has
869/// decreased; and conversely, when converting a derived MP to a base MP
870/// we subtract the offset from the adjustment because the address point
871/// has increased.
872///
873/// The standard forbids (at compile time) conversion to and from
874/// virtual bases, which is why we don't have to consider them here.
875///
876/// The standard forbids (at run time) casting a derived MP to a base
877/// MP when the derived MP does not point to a member of the base.
878/// This is why -1 is a reasonable choice for null data member
879/// pointers.
880llvm::Value *
881ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
882 const CastExpr *E,
883 llvm::Value *src) {
884 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
885 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
886 E->getCastKind() == CK_ReinterpretMemberPointer);
887
888 // Under Itanium, reinterprets don't require any additional processing.
889 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
890
891 // Use constant emission if we can.
892 if (isa<llvm::Constant>(src))
893 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
894
895 llvm::Constant *adj = getMemberPointerAdjustment(E);
896 if (!adj) return src;
897
898 CGBuilderTy &Builder = CGF.Builder;
899 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
900
901 const MemberPointerType *destTy =
903
904 // For member data pointers, this is just a matter of adding the
905 // offset if the source is non-null.
906 if (destTy->isMemberDataPointer()) {
907 llvm::Value *dst;
908 if (isDerivedToBase)
909 dst = Builder.CreateNSWSub(src, adj, "adj");
910 else
911 dst = Builder.CreateNSWAdd(src, adj, "adj");
912
913 // Null check.
914 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
915 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
916 return Builder.CreateSelect(isNull, src, dst);
917 }
918
919 // The this-adjustment is left-shifted by 1 on ARM.
920 if (UseARMMethodPtrABI) {
921 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
922 offset <<= 1;
923 adj = llvm::ConstantInt::get(adj->getType(), offset);
924 }
925
926 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
927 llvm::Value *dstAdj;
928 if (isDerivedToBase)
929 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
930 else
931 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
932
933 return Builder.CreateInsertValue(src, dstAdj, 1);
934}
935
936llvm::Constant *
937ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
938 llvm::Constant *src) {
939 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
940 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
941 E->getCastKind() == CK_ReinterpretMemberPointer);
942
943 // Under Itanium, reinterprets don't require any additional processing.
944 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
945
946 // If the adjustment is trivial, we don't need to do anything.
947 llvm::Constant *adj = getMemberPointerAdjustment(E);
948 if (!adj) return src;
949
950 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
951
952 const MemberPointerType *destTy =
954
955 // For member data pointers, this is just a matter of adding the
956 // offset if the source is non-null.
957 if (destTy->isMemberDataPointer()) {
958 // null maps to null.
959 if (src->isAllOnesValue()) return src;
960
961 if (isDerivedToBase)
962 return llvm::ConstantExpr::getNSWSub(src, adj);
963 else
964 return llvm::ConstantExpr::getNSWAdd(src, adj);
965 }
966
967 // The this-adjustment is left-shifted by 1 on ARM.
968 if (UseARMMethodPtrABI) {
969 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
970 offset <<= 1;
971 adj = llvm::ConstantInt::get(adj->getType(), offset);
972 }
973
974 llvm::Constant *srcAdj = src->getAggregateElement(1);
975 llvm::Constant *dstAdj;
976 if (isDerivedToBase)
977 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
978 else
979 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
980
981 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
982 assert(res != nullptr && "Folding must succeed");
983 return res;
984}
985
986llvm::Constant *
987ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
988 // Itanium C++ ABI 2.3:
989 // A NULL pointer is represented as -1.
990 if (MPT->isMemberDataPointer())
991 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
992
993 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
994 llvm::Constant *Values[2] = { Zero, Zero };
995 return llvm::ConstantStruct::getAnon(Values);
996}
997
998llvm::Constant *
999ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1000 CharUnits offset) {
1001 // Itanium C++ ABI 2.3:
1002 // A pointer to data member is an offset from the base address of
1003 // the class object containing it, represented as a ptrdiff_t
1004 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1005}
1006
1007llvm::Constant *
1008ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1009 return BuildMemberPointer(MD, CharUnits::Zero());
1010}
1011
1012llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1014 assert(MD->isInstance() && "Member function must not be static!");
1015
1016 CodeGenTypes &Types = CGM.getTypes();
1017
1018 // Get the function pointer (or index if this is a virtual function).
1019 llvm::Constant *MemPtr[2];
1020 if (MD->isVirtual()) {
1021 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1022 uint64_t VTableOffset;
1023 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1024 // Multiply by 4-byte relative offsets.
1025 VTableOffset = Index * 4;
1026 } else {
1027 const ASTContext &Context = getContext();
1028 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1029 Context.getTargetInfo().getPointerWidth(LangAS::Default));
1030 VTableOffset = Index * PointerWidth.getQuantity();
1031 }
1032
1033 if (UseARMMethodPtrABI) {
1034 // ARM C++ ABI 3.2.1:
1035 // This ABI specifies that adj contains twice the this
1036 // adjustment, plus 1 if the member function is virtual. The
1037 // least significant bit of adj then makes exactly the same
1038 // discrimination as the least significant bit of ptr does for
1039 // Itanium.
1040 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1041 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1042 2 * ThisAdjustment.getQuantity() + 1);
1043 } else {
1044 // Itanium C++ ABI 2.3:
1045 // For a virtual function, [the pointer field] is 1 plus the
1046 // virtual table offset (in bytes) of the function,
1047 // represented as a ptrdiff_t.
1048 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1049 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1050 ThisAdjustment.getQuantity());
1051 }
1052 } else {
1053 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1054 llvm::Type *Ty;
1055 // Check whether the function has a computable LLVM signature.
1056 if (Types.isFuncTypeConvertible(FPT)) {
1057 // The function has a computable LLVM signature; use the correct type.
1058 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1059 } else {
1060 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1061 // function type is incomplete.
1062 Ty = CGM.PtrDiffTy;
1063 }
1064 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1065
1066 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1067 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1068 (UseARMMethodPtrABI ? 2 : 1) *
1069 ThisAdjustment.getQuantity());
1070 }
1071
1072 return llvm::ConstantStruct::getAnon(MemPtr);
1073}
1074
1075llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1076 QualType MPType) {
1077 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1078 const ValueDecl *MPD = MP.getMemberPointerDecl();
1079 if (!MPD)
1080 return EmitNullMemberPointer(MPT);
1081
1082 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1083
1084 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1085 return BuildMemberPointer(MD, ThisAdjustment);
1086
1087 CharUnits FieldOffset =
1088 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1089 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1090}
1091
1092/// The comparison algorithm is pretty easy: the member pointers are
1093/// the same if they're either bitwise identical *or* both null.
1094///
1095/// ARM is different here only because null-ness is more complicated.
1096llvm::Value *
1097ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1098 llvm::Value *L,
1099 llvm::Value *R,
1100 const MemberPointerType *MPT,
1101 bool Inequality) {
1102 CGBuilderTy &Builder = CGF.Builder;
1103
1104 llvm::ICmpInst::Predicate Eq;
1105 llvm::Instruction::BinaryOps And, Or;
1106 if (Inequality) {
1107 Eq = llvm::ICmpInst::ICMP_NE;
1108 And = llvm::Instruction::Or;
1109 Or = llvm::Instruction::And;
1110 } else {
1111 Eq = llvm::ICmpInst::ICMP_EQ;
1112 And = llvm::Instruction::And;
1113 Or = llvm::Instruction::Or;
1114 }
1115
1116 // Member data pointers are easy because there's a unique null
1117 // value, so it just comes down to bitwise equality.
1118 if (MPT->isMemberDataPointer())
1119 return Builder.CreateICmp(Eq, L, R);
1120
1121 // For member function pointers, the tautologies are more complex.
1122 // The Itanium tautology is:
1123 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1124 // The ARM tautology is:
1125 // (L == R) <==> (L.ptr == R.ptr &&
1126 // (L.adj == R.adj ||
1127 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1128 // The inequality tautologies have exactly the same structure, except
1129 // applying De Morgan's laws.
1130
1131 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1132 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1133
1134 // This condition tests whether L.ptr == R.ptr. This must always be
1135 // true for equality to hold.
1136 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1137
1138 // This condition, together with the assumption that L.ptr == R.ptr,
1139 // tests whether the pointers are both null. ARM imposes an extra
1140 // condition.
1141 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1142 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1143
1144 // This condition tests whether L.adj == R.adj. If this isn't
1145 // true, the pointers are unequal unless they're both null.
1146 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1147 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1148 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1149
1150 // Null member function pointers on ARM clear the low bit of Adj,
1151 // so the zero condition has to check that neither low bit is set.
1152 if (UseARMMethodPtrABI) {
1153 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1154
1155 // Compute (l.adj | r.adj) & 1 and test it against zero.
1156 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1157 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1158 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1159 "cmp.or.adj");
1160 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1161 }
1162
1163 // Tie together all our conditions.
1164 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1165 Result = Builder.CreateBinOp(And, PtrEq, Result,
1166 Inequality ? "memptr.ne" : "memptr.eq");
1167 return Result;
1168}
1169
1170llvm::Value *
1171ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1172 llvm::Value *MemPtr,
1173 const MemberPointerType *MPT) {
1174 CGBuilderTy &Builder = CGF.Builder;
1175
1176 /// For member data pointers, this is just a check against -1.
1177 if (MPT->isMemberDataPointer()) {
1178 assert(MemPtr->getType() == CGM.PtrDiffTy);
1179 llvm::Value *NegativeOne =
1180 llvm::Constant::getAllOnesValue(MemPtr->getType());
1181 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1182 }
1183
1184 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1185 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1186
1187 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1188 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1189
1190 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1191 // (the virtual bit) is set.
1192 if (UseARMMethodPtrABI) {
1193 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1194 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1195 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1196 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1197 "memptr.isvirtual");
1198 Result = Builder.CreateOr(Result, IsVirtual);
1199 }
1200
1201 return Result;
1202}
1203
1204bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1205 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1206 if (!RD)
1207 return false;
1208
1209 // If C++ prohibits us from making a copy, return by address.
1210 if (!RD->canPassInRegisters()) {
1211 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1212 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1213 return true;
1214 }
1215 return false;
1216}
1217
1218/// The Itanium ABI requires non-zero initialization only for data
1219/// member pointers, for which '0' is a valid offset.
1220bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1221 return MPT->isMemberFunctionPointer();
1222}
1223
1224/// The Itanium ABI always places an offset to the complete object
1225/// at entry -2 in the vtable.
1226void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1227 const CXXDeleteExpr *DE,
1228 Address Ptr,
1229 QualType ElementType,
1230 const CXXDestructorDecl *Dtor) {
1231 bool UseGlobalDelete = DE->isGlobalDelete();
1232 if (UseGlobalDelete) {
1233 // Derive the complete-object pointer, which is what we need
1234 // to pass to the deallocation function.
1235
1236 // Grab the vtable pointer as an intptr_t*.
1237 auto *ClassDecl =
1238 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1239 llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
1240
1241 // Track back to entry -2 and pull out the offset there.
1242 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1243 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1244 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,
1245 CGF.getPointerAlign());
1246
1247 // Apply the offset.
1248 llvm::Value *CompletePtr = Ptr.getPointer();
1249 CompletePtr =
1250 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1251
1252 // If we're supposed to call the global delete, make sure we do so
1253 // even if the destructor throws.
1254 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1255 ElementType);
1256 }
1257
1258 // FIXME: Provide a source location here even though there's no
1259 // CXXMemberCallExpr for dtor call.
1260 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1261 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1262
1263 if (UseGlobalDelete)
1264 CGF.PopCleanupBlock();
1265}
1266
1267void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1268 // void __cxa_rethrow();
1269
1270 llvm::FunctionType *FTy =
1271 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1272
1273 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1274
1275 if (isNoReturn)
1276 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt);
1277 else
1279}
1280
1281static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1282 // void *__cxa_allocate_exception(size_t thrown_size);
1283
1284 llvm::FunctionType *FTy =
1285 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1286
1287 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1288}
1289
1290static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1291 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1292 // void (*dest) (void *));
1293
1294 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1295 llvm::FunctionType *FTy =
1296 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1297
1298 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1299}
1300
1301void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1302 QualType ThrowType = E->getSubExpr()->getType();
1303 // Now allocate the exception object.
1304 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1305 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1306
1307 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1308 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1309 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1310
1311 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1312 CGF.EmitAnyExprToExn(
1313 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1314
1315 // Now throw the exception.
1316 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1317 /*ForEH=*/true);
1318
1319 // The address of the destructor. If the exception type has a
1320 // trivial destructor (or isn't a record), we just pass null.
1321 llvm::Constant *Dtor = nullptr;
1322 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1323 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1324 if (!Record->hasTrivialDestructor()) {
1325 CXXDestructorDecl *DtorD = Record->getDestructor();
1326 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1327 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1328 }
1329 }
1330 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1331
1332 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1334}
1335
1336static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1337 // void *__dynamic_cast(const void *sub,
1338 // global_as const abi::__class_type_info *src,
1339 // global_as const abi::__class_type_info *dst,
1340 // std::ptrdiff_t src2dst_offset);
1341
1342 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1343 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1344 llvm::Type *PtrDiffTy =
1346
1347 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1348
1349 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1350
1351 // Mark the function as nounwind readonly.
1352 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1353 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1354 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
1355 llvm::AttributeList Attrs = llvm::AttributeList::get(
1356 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1357
1358 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1359}
1360
1361static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1362 // void __cxa_bad_cast();
1363 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1364 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1365}
1366
1367/// Compute the src2dst_offset hint as described in the
1368/// Itanium C++ ABI [2.9.7]
1370 const CXXRecordDecl *Src,
1371 const CXXRecordDecl *Dst) {
1372 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1373 /*DetectVirtual=*/false);
1374
1375 // If Dst is not derived from Src we can skip the whole computation below and
1376 // return that Src is not a public base of Dst. Record all inheritance paths.
1377 if (!Dst->isDerivedFrom(Src, Paths))
1378 return CharUnits::fromQuantity(-2ULL);
1379
1380 unsigned NumPublicPaths = 0;
1381 CharUnits Offset;
1382
1383 // Now walk all possible inheritance paths.
1384 for (const CXXBasePath &Path : Paths) {
1385 if (Path.Access != AS_public) // Ignore non-public inheritance.
1386 continue;
1387
1388 ++NumPublicPaths;
1389
1390 for (const CXXBasePathElement &PathElement : Path) {
1391 // If the path contains a virtual base class we can't give any hint.
1392 // -1: no hint.
1393 if (PathElement.Base->isVirtual())
1394 return CharUnits::fromQuantity(-1ULL);
1395
1396 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1397 continue;
1398
1399 // Accumulate the base class offsets.
1400 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1401 Offset += L.getBaseClassOffset(
1402 PathElement.Base->getType()->getAsCXXRecordDecl());
1403 }
1404 }
1405
1406 // -2: Src is not a public base of Dst.
1407 if (NumPublicPaths == 0)
1408 return CharUnits::fromQuantity(-2ULL);
1409
1410 // -3: Src is a multiple public base type but never a virtual base type.
1411 if (NumPublicPaths > 1)
1412 return CharUnits::fromQuantity(-3ULL);
1413
1414 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1415 // Return the offset of Src from the origin of Dst.
1416 return Offset;
1417}
1418
1419static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1420 // void __cxa_bad_typeid();
1421 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1422
1423 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1424}
1425
1426bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1427 QualType SrcRecordTy) {
1428 return IsDeref;
1429}
1430
1431void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1432 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1433 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1434 Call->setDoesNotReturn();
1435 CGF.Builder.CreateUnreachable();
1436}
1437
1438llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1439 QualType SrcRecordTy,
1440 Address ThisPtr,
1441 llvm::Type *StdTypeInfoPtrTy) {
1442 auto *ClassDecl =
1443 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1444 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
1445 ClassDecl);
1446
1447 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1448 // Load the type info.
1449 Value = CGF.Builder.CreateCall(
1450 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1451 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1452 } else {
1453 // Load the type info.
1454 Value =
1455 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1456 }
1457 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1458 CGF.getPointerAlign());
1459}
1460
1461bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1462 QualType SrcRecordTy) {
1463 return SrcIsPtr;
1464}
1465
1466llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1467 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1468 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1469 llvm::Type *PtrDiffLTy =
1471
1472 llvm::Value *SrcRTTI =
1474 llvm::Value *DestRTTI =
1476
1477 // Compute the offset hint.
1478 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1479 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1480 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1481 PtrDiffLTy,
1482 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1483
1484 // Emit the call to __dynamic_cast.
1485 llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint};
1486 llvm::Value *Value =
1488
1489 /// C++ [expr.dynamic.cast]p9:
1490 /// A failed cast to reference type throws std::bad_cast
1491 if (DestTy->isReferenceType()) {
1492 llvm::BasicBlock *BadCastBlock =
1493 CGF.createBasicBlock("dynamic_cast.bad_cast");
1494
1495 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1496 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1497
1498 CGF.EmitBlock(BadCastBlock);
1499 EmitBadCastCall(CGF);
1500 }
1501
1502 return Value;
1503}
1504
1505llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1506 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1507 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
1508 llvm::BasicBlock *CastFail) {
1509 ASTContext &Context = getContext();
1510
1511 // Find all the inheritance paths.
1512 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1513 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1514 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1515 /*DetectVirtual=*/false);
1516 (void)DestDecl->isDerivedFrom(SrcDecl, Paths);
1517
1518 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1519 // might appear.
1520 std::optional<CharUnits> Offset;
1521 for (const CXXBasePath &Path : Paths) {
1522 // dynamic_cast only finds public inheritance paths.
1523 if (Path.Access != AS_public)
1524 continue;
1525
1526 CharUnits PathOffset;
1527 for (const CXXBasePathElement &PathElement : Path) {
1528 // Find the offset along this inheritance step.
1529 const CXXRecordDecl *Base =
1530 PathElement.Base->getType()->getAsCXXRecordDecl();
1531 if (PathElement.Base->isVirtual()) {
1532 // For a virtual base class, we know that the derived class is exactly
1533 // DestDecl, so we can use the vbase offset from its layout.
1534 const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl);
1535 PathOffset = L.getVBaseClassOffset(Base);
1536 } else {
1537 const ASTRecordLayout &L =
1538 Context.getASTRecordLayout(PathElement.Class);
1539 PathOffset += L.getBaseClassOffset(Base);
1540 }
1541 }
1542
1543 if (!Offset)
1544 Offset = PathOffset;
1545 else if (Offset != PathOffset) {
1546 // Base appears in at least two different places. Find the most-derived
1547 // object and see if it's a DestDecl. Note that the most-derived object
1548 // must be at least as aligned as this base class subobject, and must
1549 // have a vptr at offset 0.
1550 ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
1551 CGF.VoidPtrTy, ThisAddr.getAlignment());
1552 SrcDecl = DestDecl;
1553 Offset = CharUnits::Zero();
1554 break;
1555 }
1556 }
1557
1558 if (!Offset) {
1559 // If there are no public inheritance paths, the cast always fails.
1560 CGF.EmitBranch(CastFail);
1561 return llvm::PoisonValue::get(CGF.VoidPtrTy);
1562 }
1563
1564 // Compare the vptr against the expected vptr for the destination type at
1565 // this offset. Note that we do not know what type ThisAddr points to in
1566 // the case where the derived class multiply inherits from the base class
1567 // so we can't use GetVTablePtr, so we load the vptr directly instead.
1568 llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
1569 ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
1570 CGM.DecorateInstructionWithTBAA(
1571 VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
1572 llvm::Value *Success = CGF.Builder.CreateICmpEQ(
1573 VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
1574 llvm::Value *Result = ThisAddr.getPointer();
1575 if (!Offset->isZero())
1576 Result = CGF.Builder.CreateInBoundsGEP(
1577 CGF.CharTy, Result,
1578 {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
1579 CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
1580 return Result;
1581}
1582
1583llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1584 Address ThisAddr,
1585 QualType SrcRecordTy) {
1586 auto *ClassDecl =
1587 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1588 llvm::Value *OffsetToTop;
1589 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1590 // Get the vtable pointer.
1591 llvm::Value *VTable =
1592 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1593
1594 // Get the offset-to-top from the vtable.
1595 OffsetToTop =
1596 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1597 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1598 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1599 } else {
1600 llvm::Type *PtrDiffLTy =
1602
1603 // Get the vtable pointer.
1604 llvm::Value *VTable =
1605 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1606
1607 // Get the offset-to-top from the vtable.
1608 OffsetToTop =
1609 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1610 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1611 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1612 }
1613 // Finally, add the offset to the pointer.
1614 return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(),
1615 OffsetToTop);
1616}
1617
1618bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1619 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1620 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1621 Call->setDoesNotReturn();
1622 CGF.Builder.CreateUnreachable();
1623 return true;
1624}
1625
1626llvm::Value *
1627ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1628 Address This,
1629 const CXXRecordDecl *ClassDecl,
1630 const CXXRecordDecl *BaseClassDecl) {
1631 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1632 CharUnits VBaseOffsetOffset =
1633 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1634 BaseClassDecl);
1635 llvm::Value *VBaseOffsetPtr =
1636 CGF.Builder.CreateConstGEP1_64(
1637 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1638 "vbase.offset.ptr");
1639
1640 llvm::Value *VBaseOffset;
1641 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1642 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1643 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1644 "vbase.offset");
1645 } else {
1646 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1647 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1648 }
1649 return VBaseOffset;
1650}
1651
1652void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1653 // Just make sure we're in sync with TargetCXXABI.
1654 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1655
1656 // The constructor used for constructing this as a base class;
1657 // ignores virtual bases.
1658 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1659
1660 // The constructor used for constructing this as a complete class;
1661 // constructs the virtual bases, then calls the base constructor.
1662 if (!D->getParent()->isAbstract()) {
1663 // We don't need to emit the complete ctor if the class is abstract.
1664 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1665 }
1666}
1667
1669ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1671 ASTContext &Context = getContext();
1672
1673 // All parameters are already in place except VTT, which goes after 'this'.
1674 // These are Clang types, so we don't need to worry about sret yet.
1675
1676 // Check if we need to add a VTT parameter (which has type global void **).
1677 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1678 : GD.getDtorType() == Dtor_Base) &&
1679 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1680 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1681 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1682 ArgTys.insert(ArgTys.begin() + 1,
1684 return AddedStructorArgCounts::prefix(1);
1685 }
1686 return AddedStructorArgCounts{};
1687}
1688
1689void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1690 // The destructor used for destructing this as a base class; ignores
1691 // virtual bases.
1692 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1693
1694 // The destructor used for destructing this as a most-derived class;
1695 // call the base destructor and then destructs any virtual bases.
1696 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1697
1698 // The destructor in a virtual table is always a 'deleting'
1699 // destructor, which calls the complete destructor and then uses the
1700 // appropriate operator delete.
1701 if (D->isVirtual())
1702 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1703}
1704
1705void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1706 QualType &ResTy,
1707 FunctionArgList &Params) {
1708 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1709 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1710
1711 // Check if we need a VTT parameter as well.
1712 if (NeedsVTTParameter(CGF.CurGD)) {
1713 ASTContext &Context = getContext();
1714
1715 // FIXME: avoid the fake decl
1716 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1717 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1718 QualType T = Context.getPointerType(Q);
1719 auto *VTTDecl = ImplicitParamDecl::Create(
1720 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1722 Params.insert(Params.begin() + 1, VTTDecl);
1723 getStructorImplicitParamDecl(CGF) = VTTDecl;
1724 }
1725}
1726
1727void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1728 // Naked functions have no prolog.
1729 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1730 return;
1731
1732 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1733 /// adjustments are required, because they are all handled by thunks.
1734 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1735
1736 /// Initialize the 'vtt' slot if needed.
1737 if (getStructorImplicitParamDecl(CGF)) {
1738 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1739 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1740 }
1741
1742 /// If this is a function that the ABI specifies returns 'this', initialize
1743 /// the return slot to 'this' at the start of the function.
1744 ///
1745 /// Unlike the setting of return types, this is done within the ABI
1746 /// implementation instead of by clients of CGCXXABI because:
1747 /// 1) getThisValue is currently protected
1748 /// 2) in theory, an ABI could implement 'this' returns some other way;
1749 /// HasThisReturn only specifies a contract, not the implementation
1750 if (HasThisReturn(CGF.CurGD))
1751 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1752}
1753
1754CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1756 bool ForVirtualBase, bool Delegating) {
1757 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1758 return AddedStructorArgs{};
1759
1760 // Insert the implicit 'vtt' argument as the second argument. Make sure to
1761 // correctly reflect its address space, which can differ from generic on
1762 // some targets.
1763 llvm::Value *VTT =
1764 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1765 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1766 QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS);
1767 QualType VTTTy = getContext().getPointerType(Q);
1768 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1769}
1770
1771llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1773 bool ForVirtualBase, bool Delegating) {
1774 GlobalDecl GD(DD, Type);
1775 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1776}
1777
1778void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1779 const CXXDestructorDecl *DD,
1780 CXXDtorType Type, bool ForVirtualBase,
1781 bool Delegating, Address This,
1782 QualType ThisTy) {
1783 GlobalDecl GD(DD, Type);
1784 llvm::Value *VTT =
1785 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1786 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1787
1789 if (getContext().getLangOpts().AppleKext &&
1790 Type != Dtor_Base && DD->isVirtual())
1792 else
1793 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1794
1795 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1796 nullptr);
1797}
1798
1799void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1800 const CXXRecordDecl *RD) {
1801 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1802 if (VTable->hasInitializer())
1803 return;
1804
1805 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1806 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1807 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1808 llvm::Constant *RTTI =
1809 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1810
1811 // Create and set the initializer.
1812 ConstantInitBuilder builder(CGM);
1813 auto components = builder.beginStruct();
1814 CGVT.createVTableInitializer(components, VTLayout, RTTI,
1815 llvm::GlobalValue::isLocalLinkage(Linkage));
1816 components.finishAndSetAsInitializer(VTable);
1817
1818 // Set the correct linkage.
1819 VTable->setLinkage(Linkage);
1820
1821 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1822 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1823
1824 // Set the right visibility.
1825 CGM.setGVProperties(VTable, RD);
1826
1827 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1828 // we will emit the typeinfo for the fundamental types. This is the
1829 // same behaviour as GCC.
1830 const DeclContext *DC = RD->getDeclContext();
1831 if (RD->getIdentifier() &&
1832 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1833 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1834 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1836 EmitFundamentalRTTIDescriptors(RD);
1837
1838 // Always emit type metadata on non-available_externally definitions, and on
1839 // available_externally definitions if we are performing whole program
1840 // devirtualization. For WPD we need the type metadata on all vtable
1841 // definitions to ensure we associate derived classes with base classes
1842 // defined in headers but with a strong definition only in a shared library.
1843 if (!VTable->isDeclarationForLinker() ||
1844 CGM.getCodeGenOpts().WholeProgramVTables) {
1845 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1846 // For available_externally definitions, add the vtable to
1847 // @llvm.compiler.used so that it isn't deleted before whole program
1848 // analysis.
1849 if (VTable->isDeclarationForLinker()) {
1850 assert(CGM.getCodeGenOpts().WholeProgramVTables);
1851 CGM.addCompilerUsedGlobal(VTable);
1852 }
1853 }
1854
1855 if (VTContext.isRelativeLayout()) {
1856 CGVT.RemoveHwasanMetadata(VTable);
1857 if (!VTable->isDSOLocal())
1858 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1859 }
1860}
1861
1862bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1863 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1864 if (Vptr.NearestVBase == nullptr)
1865 return false;
1866 return NeedsVTTParameter(CGF.CurGD);
1867}
1868
1869llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1870 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1871 const CXXRecordDecl *NearestVBase) {
1872
1873 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1874 NeedsVTTParameter(CGF.CurGD)) {
1875 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1876 NearestVBase);
1877 }
1878 return getVTableAddressPoint(Base, VTableClass);
1879}
1880
1881llvm::Constant *
1882ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1883 const CXXRecordDecl *VTableClass) {
1884 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1885
1886 // Find the appropriate vtable within the vtable group, and the address point
1887 // within that vtable.
1889 CGM.getItaniumVTableContext()
1890 .getVTableLayout(VTableClass)
1891 .getAddressPoint(Base);
1892 llvm::Value *Indices[] = {
1893 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1894 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1895 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1896 };
1897
1898 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1899 Indices, /*InBounds=*/true,
1900 /*InRangeIndex=*/1);
1901}
1902
1903// Check whether all the non-inline virtual methods for the class have the
1904// specified attribute.
1905template <typename T>
1907 bool FoundNonInlineVirtualMethodWithAttr = false;
1908 for (const auto *D : RD->noload_decls()) {
1909 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1910 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1911 FD->doesThisDeclarationHaveABody())
1912 continue;
1913 if (!D->hasAttr<T>())
1914 return false;
1915 FoundNonInlineVirtualMethodWithAttr = true;
1916 }
1917 }
1918
1919 // We didn't find any non-inline virtual methods missing the attribute. We
1920 // will return true when we found at least one non-inline virtual with the
1921 // attribute. (This lets our caller know that the attribute needs to be
1922 // propagated up to the vtable.)
1923 return FoundNonInlineVirtualMethodWithAttr;
1924}
1925
1926llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1927 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1928 const CXXRecordDecl *NearestVBase) {
1929 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1930 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1931
1932 // Get the secondary vpointer index.
1933 uint64_t VirtualPointerIndex =
1934 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1935
1936 /// Load the VTT.
1937 llvm::Value *VTT = CGF.LoadCXXVTT();
1938 if (VirtualPointerIndex)
1939 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT,
1940 VirtualPointerIndex);
1941
1942 // And load the address point from the VTT.
1943 return CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
1944 CGF.getPointerAlign());
1945}
1946
1947llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1948 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1949 return getVTableAddressPoint(Base, VTableClass);
1950}
1951
1952llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1953 CharUnits VPtrOffset) {
1954 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1955
1956 llvm::GlobalVariable *&VTable = VTables[RD];
1957 if (VTable)
1958 return VTable;
1959
1960 // Queue up this vtable for possible deferred emission.
1961 CGM.addDeferredVTable(RD);
1962
1963 SmallString<256> Name;
1964 llvm::raw_svector_ostream Out(Name);
1965 getMangleContext().mangleCXXVTable(RD, Out);
1966
1967 const VTableLayout &VTLayout =
1968 CGM.getItaniumVTableContext().getVTableLayout(RD);
1969 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1970
1971 // Use pointer to global alignment for the vtable. Otherwise we would align
1972 // them based on the size of the initializer which doesn't make sense as only
1973 // single values are read.
1974 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1975 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1976 ? 32
1977 : CGM.getTarget().getPointerAlign(AS);
1978
1979 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1980 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1981 getContext().toCharUnitsFromBits(PAlign).getAsAlign());
1982 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1983
1984 // In MS C++ if you have a class with virtual functions in which you are using
1985 // selective member import/export, then all virtual functions must be exported
1986 // unless they are inline, otherwise a link error will result. To match this
1987 // behavior, for such classes, we dllimport the vtable if it is defined
1988 // externally and all the non-inline virtual methods are marked dllimport, and
1989 // we dllexport the vtable if it is defined in this TU and all the non-inline
1990 // virtual methods are marked dllexport.
1991 if (CGM.getTarget().hasPS4DLLImportExport()) {
1992 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1993 if (CGM.getVTables().isVTableExternal(RD)) {
1994 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1995 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1996 } else {
1997 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1998 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1999 }
2000 }
2001 }
2002 CGM.setGVProperties(VTable, RD);
2003
2004 return VTable;
2005}
2006
2007CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2008 GlobalDecl GD,
2009 Address This,
2010 llvm::Type *Ty,
2011 SourceLocation Loc) {
2012 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2013 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
2014 llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
2015
2016 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2017 llvm::Value *VFunc;
2018 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
2019 VFunc = CGF.EmitVTableTypeCheckedLoad(
2020 MethodDecl->getParent(), VTable, PtrTy,
2021 VTableIndex *
2022 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
2023 8);
2024 } else {
2025 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
2026
2027 llvm::Value *VFuncLoad;
2028 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2029 VFuncLoad = CGF.Builder.CreateCall(
2030 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
2031 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
2032 } else {
2033 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2034 PtrTy, VTable, VTableIndex, "vfn");
2035 VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
2036 CGF.getPointerAlign());
2037 }
2038
2039 // Add !invariant.load md to virtual function load to indicate that
2040 // function didn't change inside vtable.
2041 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2042 // help in devirtualization because it will only matter if we will have 2
2043 // the same virtual function loads from the same vtable load, which won't
2044 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2045 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2046 CGM.getCodeGenOpts().StrictVTablePointers) {
2047 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
2048 VFuncLoadInstr->setMetadata(
2049 llvm::LLVMContext::MD_invariant_load,
2050 llvm::MDNode::get(CGM.getLLVMContext(),
2052 }
2053 }
2054 VFunc = VFuncLoad;
2055 }
2056
2057 CGCallee Callee(GD, VFunc);
2058 return Callee;
2059}
2060
2061llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2062 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2063 Address This, DeleteOrMemberCallExpr E) {
2064 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2065 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2066 assert((CE != nullptr) ^ (D != nullptr));
2067 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2068 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2069
2070 GlobalDecl GD(Dtor, DtorType);
2071 const CGFunctionInfo *FInfo =
2072 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2073 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2074 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2075
2076 QualType ThisTy;
2077 if (CE) {
2078 ThisTy = CE->getObjectType();
2079 } else {
2080 ThisTy = D->getDestroyedType();
2081 }
2082
2083 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2084 QualType(), nullptr);
2085 return nullptr;
2086}
2087
2088void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2089 CodeGenVTables &VTables = CGM.getVTables();
2090 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2091 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2092}
2093
2094bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2095 const CXXRecordDecl *RD) const {
2096 // We don't emit available_externally vtables if we are in -fapple-kext mode
2097 // because kext mode does not permit devirtualization.
2098 if (CGM.getLangOpts().AppleKext)
2099 return false;
2100
2101 // If the vtable is hidden then it is not safe to emit an available_externally
2102 // copy of vtable.
2103 if (isVTableHidden(RD))
2104 return false;
2105
2106 if (CGM.getCodeGenOpts().ForceEmitVTables)
2107 return true;
2108
2109 // If we don't have any not emitted inline virtual function then we are safe
2110 // to emit an available_externally copy of vtable.
2111 // FIXME we can still emit a copy of the vtable if we
2112 // can emit definition of the inline functions.
2113 if (hasAnyUnusedVirtualInlineFunction(RD))
2114 return false;
2115
2116 // For a class with virtual bases, we must also be able to speculatively
2117 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2118 // the vtable" and "can emit the VTT". For a base subobject, this means we
2119 // need to be able to emit non-virtual base vtables.
2120 if (RD->getNumVBases()) {
2121 for (const auto &B : RD->bases()) {
2122 auto *BRD = B.getType()->getAsCXXRecordDecl();
2123 assert(BRD && "no class for base specifier");
2124 if (B.isVirtual() || !BRD->isDynamicClass())
2125 continue;
2126 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2127 return false;
2128 }
2129 }
2130
2131 return true;
2132}
2133
2134bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2135 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2136 return false;
2137
2138 // For a complete-object vtable (or more specifically, for the VTT), we need
2139 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2140 for (const auto &B : RD->vbases()) {
2141 auto *BRD = B.getType()->getAsCXXRecordDecl();
2142 assert(BRD && "no class for base specifier");
2143 if (!BRD->isDynamicClass())
2144 continue;
2145 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2146 return false;
2147 }
2148
2149 return true;
2150}
2152 Address InitialPtr,
2153 int64_t NonVirtualAdjustment,
2154 int64_t VirtualAdjustment,
2155 bool IsReturnAdjustment) {
2156 if (!NonVirtualAdjustment && !VirtualAdjustment)
2157 return InitialPtr.getPointer();
2158
2159 Address V = InitialPtr.withElementType(CGF.Int8Ty);
2160
2161 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2162 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2164 CharUnits::fromQuantity(NonVirtualAdjustment));
2165 }
2166
2167 // Perform the virtual adjustment if we have one.
2168 llvm::Value *ResultPtr;
2169 if (VirtualAdjustment) {
2170 Address VTablePtrPtr = V.withElementType(CGF.Int8PtrTy);
2171 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2172
2173 llvm::Value *Offset;
2174 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2175 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2177 // Load the adjustment offset from the vtable as a 32-bit int.
2178 Offset =
2179 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2181 } else {
2182 llvm::Type *PtrDiffTy =
2184
2185 // Load the adjustment offset from the vtable.
2186 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2187 CGF.getPointerAlign());
2188 }
2189 // Adjust our pointer.
2190 ResultPtr = CGF.Builder.CreateInBoundsGEP(
2191 V.getElementType(), V.getPointer(), Offset);
2192 } else {
2193 ResultPtr = V.getPointer();
2194 }
2195
2196 // In a derived-to-base conversion, the non-virtual adjustment is
2197 // applied second.
2198 if (NonVirtualAdjustment && IsReturnAdjustment) {
2199 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2200 NonVirtualAdjustment);
2201 }
2202
2203 return ResultPtr;
2204}
2205
2206llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2207 Address This,
2208 const ThisAdjustment &TA) {
2209 return performTypeAdjustment(CGF, This, TA.NonVirtual,
2211 /*IsReturnAdjustment=*/false);
2212}
2213
2214llvm::Value *
2215ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2216 const ReturnAdjustment &RA) {
2217 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2219 /*IsReturnAdjustment=*/true);
2220}
2221
2222void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2223 RValue RV, QualType ResultType) {
2224 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2225 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2226
2227 // Destructor thunks in the ARM ABI have indeterminate results.
2228 llvm::Type *T = CGF.ReturnValue.getElementType();
2229 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2230 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2231}
2232
2233/************************** Array allocation cookies **************************/
2234
2235CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2236 // The array cookie is a size_t; pad that up to the element alignment.
2237 // The cookie is actually right-justified in that space.
2238 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2239 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2240}
2241
2242Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2243 Address NewPtr,
2244 llvm::Value *NumElements,
2245 const CXXNewExpr *expr,
2246 QualType ElementType) {
2247 assert(requiresArrayCookie(expr));
2248
2249 unsigned AS = NewPtr.getAddressSpace();
2250
2251 ASTContext &Ctx = getContext();
2252 CharUnits SizeSize = CGF.getSizeSize();
2253
2254 // The size of the cookie.
2255 CharUnits CookieSize =
2256 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2257 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2258
2259 // Compute an offset to the cookie.
2260 Address CookiePtr = NewPtr;
2261 CharUnits CookieOffset = CookieSize - SizeSize;
2262 if (!CookieOffset.isZero())
2263 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2264
2265 // Write the number of elements into the appropriate slot.
2266 Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy);
2267 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2268
2269 // Handle the array cookie specially in ASan.
2270 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2271 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2272 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2273 // The store to the CookiePtr does not need to be instrumented.
2274 SI->setNoSanitizeMetadata();
2275 llvm::FunctionType *FTy =
2276 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2277 llvm::FunctionCallee F =
2278 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2279 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2280 }
2281
2282 // Finally, compute a pointer to the actual data buffer by skipping
2283 // over the cookie completely.
2284 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2285}
2286
2287llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2288 Address allocPtr,
2289 CharUnits cookieSize) {
2290 // The element size is right-justified in the cookie.
2291 Address numElementsPtr = allocPtr;
2292 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2293 if (!numElementsOffset.isZero())
2294 numElementsPtr =
2295 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2296
2297 unsigned AS = allocPtr.getAddressSpace();
2298 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2299 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2300 return CGF.Builder.CreateLoad(numElementsPtr);
2301 // In asan mode emit a function call instead of a regular load and let the
2302 // run-time deal with it: if the shadow is properly poisoned return the
2303 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2304 // We can't simply ignore this load using nosanitize metadata because
2305 // the metadata may be lost.
2306 llvm::FunctionType *FTy =
2307 llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
2308 llvm::FunctionCallee F =
2309 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2310 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2311}
2312
2313CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2314 // ARM says that the cookie is always:
2315 // struct array_cookie {
2316 // std::size_t element_size; // element_size != 0
2317 // std::size_t element_count;
2318 // };
2319 // But the base ABI doesn't give anything an alignment greater than
2320 // 8, so we can dismiss this as typical ABI-author blindness to
2321 // actual language complexity and round up to the element alignment.
2322 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2323 CGM.getContext().getTypeAlignInChars(elementType));
2324}
2325
2326Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2327 Address newPtr,
2328 llvm::Value *numElements,
2329 const CXXNewExpr *expr,
2330 QualType elementType) {
2331 assert(requiresArrayCookie(expr));
2332
2333 // The cookie is always at the start of the buffer.
2334 Address cookie = newPtr;
2335
2336 // The first element is the element size.
2337 cookie = cookie.withElementType(CGF.SizeTy);
2338 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2339 getContext().getTypeSizeInChars(elementType).getQuantity());
2340 CGF.Builder.CreateStore(elementSize, cookie);
2341
2342 // The second element is the element count.
2343 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2344 CGF.Builder.CreateStore(numElements, cookie);
2345
2346 // Finally, compute a pointer to the actual data buffer by skipping
2347 // over the cookie completely.
2348 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2349 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2350}
2351
2352llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2353 Address allocPtr,
2354 CharUnits cookieSize) {
2355 // The number of elements is at offset sizeof(size_t) relative to
2356 // the allocated pointer.
2357 Address numElementsPtr
2358 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2359
2360 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2361 return CGF.Builder.CreateLoad(numElementsPtr);
2362}
2363
2364/*********************** Static local initialization **************************/
2365
2366static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2367 llvm::PointerType *GuardPtrTy) {
2368 // int __cxa_guard_acquire(__guard *guard_object);
2369 llvm::FunctionType *FTy =
2370 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2371 GuardPtrTy, /*isVarArg=*/false);
2372 return CGM.CreateRuntimeFunction(
2373 FTy, "__cxa_guard_acquire",
2374 llvm::AttributeList::get(CGM.getLLVMContext(),
2375 llvm::AttributeList::FunctionIndex,
2376 llvm::Attribute::NoUnwind));
2377}
2378
2379static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2380 llvm::PointerType *GuardPtrTy) {
2381 // void __cxa_guard_release(__guard *guard_object);
2382 llvm::FunctionType *FTy =
2383 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2384 return CGM.CreateRuntimeFunction(
2385 FTy, "__cxa_guard_release",
2386 llvm::AttributeList::get(CGM.getLLVMContext(),
2387 llvm::AttributeList::FunctionIndex,
2388 llvm::Attribute::NoUnwind));
2389}
2390
2391static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2392 llvm::PointerType *GuardPtrTy) {
2393 // void __cxa_guard_abort(__guard *guard_object);
2394 llvm::FunctionType *FTy =
2395 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2396 return CGM.CreateRuntimeFunction(
2397 FTy, "__cxa_guard_abort",
2398 llvm::AttributeList::get(CGM.getLLVMContext(),
2399 llvm::AttributeList::FunctionIndex,
2400 llvm::Attribute::NoUnwind));
2401}
2402
2403namespace {
2404 struct CallGuardAbort final : EHScopeStack::Cleanup {
2405 llvm::GlobalVariable *Guard;
2406 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2407
2408 void Emit(CodeGenFunction &CGF, Flags flags) override {
2409 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2410 Guard);
2411 }
2412 };
2413}
2414
2415/// The ARM code here follows the Itanium code closely enough that we
2416/// just special-case it at particular places.
2417void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2418 const VarDecl &D,
2419 llvm::GlobalVariable *var,
2420 bool shouldPerformInit) {
2421 CGBuilderTy &Builder = CGF.Builder;
2422
2423 // Inline variables that weren't instantiated from variable templates have
2424 // partially-ordered initialization within their translation unit.
2425 bool NonTemplateInline =
2426 D.isInline() &&
2428
2429 // We only need to use thread-safe statics for local non-TLS variables and
2430 // inline variables; other global initialization is always single-threaded
2431 // or (through lazy dynamic loading in multiple threads) unsequenced.
2432 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2433 (D.isLocalVarDecl() || NonTemplateInline) &&
2434 !D.getTLSKind();
2435
2436 // If we have a global variable with internal linkage and thread-safe statics
2437 // are disabled, we can just let the guard variable be of type i8.
2438 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2439
2440 llvm::IntegerType *guardTy;
2441 CharUnits guardAlignment;
2442 if (useInt8GuardVariable) {
2443 guardTy = CGF.Int8Ty;
2444 guardAlignment = CharUnits::One();
2445 } else {
2446 // Guard variables are 64 bits in the generic ABI and size width on ARM
2447 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2448 if (UseARMGuardVarABI) {
2449 guardTy = CGF.SizeTy;
2450 guardAlignment = CGF.getSizeAlign();
2451 } else {
2452 guardTy = CGF.Int64Ty;
2453 guardAlignment =
2454 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
2455 }
2456 }
2457 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2458 CGF.CGM.getLLVMContext(),
2459 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2460
2461 // Create the guard variable if we don't already have it (as we
2462 // might if we're double-emitting this function body).
2463 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2464 if (!guard) {
2465 // Mangle the name for the guard.
2466 SmallString<256> guardName;
2467 {
2468 llvm::raw_svector_ostream out(guardName);
2469 getMangleContext().mangleStaticGuardVariable(&D, out);
2470 }
2471
2472 // Create the guard variable with a zero-initializer.
2473 // Just absorb linkage, visibility and dll storage class from the guarded
2474 // variable.
2475 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2476 false, var->getLinkage(),
2477 llvm::ConstantInt::get(guardTy, 0),
2478 guardName.str());
2479 guard->setDSOLocal(var->isDSOLocal());
2480 guard->setVisibility(var->getVisibility());
2481 guard->setDLLStorageClass(var->getDLLStorageClass());
2482 // If the variable is thread-local, so is its guard variable.
2483 guard->setThreadLocalMode(var->getThreadLocalMode());
2484 guard->setAlignment(guardAlignment.getAsAlign());
2485
2486 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2487 // group as the associated data object." In practice, this doesn't work for
2488 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2489 llvm::Comdat *C = var->getComdat();
2490 if (!D.isLocalVarDecl() && C &&
2491 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2492 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2493 guard->setComdat(C);
2494 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2495 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2496 }
2497
2498 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2499 }
2500
2501 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2502
2503 // Test whether the variable has completed initialization.
2504 //
2505 // Itanium C++ ABI 3.3.2:
2506 // The following is pseudo-code showing how these functions can be used:
2507 // if (obj_guard.first_byte == 0) {
2508 // if ( __cxa_guard_acquire (&obj_guard) ) {
2509 // try {
2510 // ... initialize the object ...;
2511 // } catch (...) {
2512 // __cxa_guard_abort (&obj_guard);
2513 // throw;
2514 // }
2515 // ... queue object destructor with __cxa_atexit() ...;
2516 // __cxa_guard_release (&obj_guard);
2517 // }
2518 // }
2519 //
2520 // If threadsafe statics are enabled, but we don't have inline atomics, just
2521 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2522 // actually inline, and the user might not expect calls to __atomic libcalls.
2523
2524 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2525 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2526 if (!threadsafe || MaxInlineWidthInBits) {
2527 // Load the first byte of the guard variable.
2528 llvm::LoadInst *LI =
2529 Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty));
2530
2531 // Itanium ABI:
2532 // An implementation supporting thread-safety on multiprocessor
2533 // systems must also guarantee that references to the initialized
2534 // object do not occur before the load of the initialization flag.
2535 //
2536 // In LLVM, we do this by marking the load Acquire.
2537 if (threadsafe)
2538 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2539
2540 // For ARM, we should only check the first bit, rather than the entire byte:
2541 //
2542 // ARM C++ ABI 3.2.3.1:
2543 // To support the potential use of initialization guard variables
2544 // as semaphores that are the target of ARM SWP and LDREX/STREX
2545 // synchronizing instructions we define a static initialization
2546 // guard variable to be a 4-byte aligned, 4-byte word with the
2547 // following inline access protocol.
2548 // #define INITIALIZED 1
2549 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2550 // if (__cxa_guard_acquire(&obj_guard))
2551 // ...
2552 // }
2553 //
2554 // and similarly for ARM64:
2555 //
2556 // ARM64 C++ ABI 3.2.2:
2557 // This ABI instead only specifies the value bit 0 of the static guard
2558 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2559 // variable is not initialized and 1 when it is.
2560 llvm::Value *V =
2561 (UseARMGuardVarABI && !useInt8GuardVariable)
2562 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2563 : LI;
2564 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2565
2566 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2567
2568 // Check if the first byte of the guard variable is zero.
2569 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2570 CodeGenFunction::GuardKind::VariableGuard, &D);
2571
2572 CGF.EmitBlock(InitCheckBlock);
2573 }
2574
2575 // The semantics of dynamic initialization of variables with static or thread
2576 // storage duration depends on whether they are declared at block-scope. The
2577 // initialization of such variables at block-scope can be aborted with an
2578 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2579 // to their initialization has undefined behavior (also per C++20
2580 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2581 // lead to termination (per C++20 [except.terminate]p1), and recursive
2582 // references to the variables are governed only by the lifetime rules (per
2583 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2584 // long as they avoid touching memory. As a result, block-scope variables must
2585 // not be marked as initialized until after initialization completes (unless
2586 // the mark is reverted following an exception), but non-block-scope variables
2587 // must be marked prior to initialization so that recursive accesses during
2588 // initialization do not restart initialization.
2589
2590 // Variables used when coping with thread-safe statics and exceptions.
2591 if (threadsafe) {
2592 // Call __cxa_guard_acquire.
2593 llvm::Value *V
2594 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2595
2596 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2597
2598 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2599 InitBlock, EndBlock);
2600
2601 // Call __cxa_guard_abort along the exceptional edge.
2602 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2603
2604 CGF.EmitBlock(InitBlock);
2605 } else if (!D.isLocalVarDecl()) {
2606 // For non-local variables, store 1 into the first byte of the guard
2607 // variable before the object initialization begins so that references
2608 // to the variable during initialization don't restart initialization.
2609 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2610 guardAddr.withElementType(CGM.Int8Ty));
2611 }
2612
2613 // Emit the initializer and add a global destructor if appropriate.
2614 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2615
2616 if (threadsafe) {
2617 // Pop the guard-abort cleanup if we pushed one.
2618 CGF.PopCleanupBlock();
2619
2620 // Call __cxa_guard_release. This cannot throw.
2621 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2622 guardAddr.getPointer());
2623 } else if (D.isLocalVarDecl()) {
2624 // For local variables, store 1 into the first byte of the guard variable
2625 // after the object initialization completes so that initialization is
2626 // retried if initialization is interrupted by an exception.
2627 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2628 guardAddr.withElementType(CGM.Int8Ty));
2629 }
2630
2631 CGF.EmitBlock(EndBlock);
2632}
2633
2634/// Register a global destructor using __cxa_atexit.
2636 llvm::FunctionCallee dtor,
2637 llvm::Constant *addr, bool TLS) {
2638 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2639 "unexpected call to emitGlobalDtorWithCXAAtExit");
2640 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2641 "__cxa_atexit is disabled");
2642 const char *Name = "__cxa_atexit";
2643 if (TLS) {
2644 const llvm::Triple &T = CGF.getTarget().getTriple();
2645 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2646 }
2647
2648 // We're assuming that the destructor function is something we can
2649 // reasonably call with the default CC.
2650 llvm::Type *dtorTy = CGF.UnqualPtrTy;
2651
2652 // Preserve address space of addr.
2653 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2654 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS)
2655 : CGF.Int8PtrTy;
2656
2657 // Create a variable that binds the atexit to this shared object.
2658 llvm::Constant *handle =
2659 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2660 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2661 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2662
2663 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2664 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2665 llvm::FunctionType *atexitTy =
2666 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2667
2668 // Fetch the actual function.
2669 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2670 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2671 fn->setDoesNotThrow();
2672
2673 if (!addr)
2674 // addr is null when we are trying to register a dtor annotated with
2675 // __attribute__((destructor)) in a constructor function. Using null here is
2676 // okay because this argument is just passed back to the destructor
2677 // function.
2678 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2679
2680 llvm::Value *args[] = {dtor.getCallee(), addr, handle};
2681 CGF.EmitNounwindRuntimeCall(atexit, args);
2682}
2683
2685 StringRef FnName) {
2686 // Create a function that registers/unregisters destructors that have the same
2687 // priority.
2688 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2689 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2690 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2691
2692 return GlobalInitOrCleanupFn;
2693}
2694
2695void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2696 for (const auto &I : DtorsUsingAtExit) {
2697 int Priority = I.first;
2698 std::string GlobalCleanupFnName =
2699 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2700
2701 llvm::Function *GlobalCleanupFn =
2702 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2703
2704 CodeGenFunction CGF(*this);
2705 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2706 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2709
2710 // Get the destructor function type, void(*)(void).
2711 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2712
2713 // Destructor functions are run/unregistered in non-ascending
2714 // order of their priorities.
2715 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2716 auto itv = Dtors.rbegin();
2717 while (itv != Dtors.rend()) {
2718 llvm::Function *Dtor = *itv;
2719
2720 // We're assuming that the destructor function is something we can
2721 // reasonably call with the correct CC.
2722 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor);
2723 llvm::Value *NeedsDestruct =
2724 CGF.Builder.CreateIsNull(V, "needs_destruct");
2725
2726 llvm::BasicBlock *DestructCallBlock =
2727 CGF.createBasicBlock("destruct.call");
2728 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2729 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2730 // Check if unatexit returns a value of 0. If it does, jump to
2731 // DestructCallBlock, otherwise jump to EndBlock directly.
2732 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2733
2734 CGF.EmitBlock(DestructCallBlock);
2735
2736 // Emit the call to casted Dtor.
2737 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor);
2738 // Make sure the call and the callee agree on calling convention.
2739 CI->setCallingConv(Dtor->getCallingConv());
2740
2741 CGF.EmitBlock(EndBlock);
2742
2743 itv++;
2744 }
2745
2746 CGF.FinishFunction();
2747 AddGlobalDtor(GlobalCleanupFn, Priority);
2748 }
2749}
2750
2751void CodeGenModule::registerGlobalDtorsWithAtExit() {
2752 for (const auto &I : DtorsUsingAtExit) {
2753 int Priority = I.first;
2754 std::string GlobalInitFnName =
2755 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2756 llvm::Function *GlobalInitFn =
2757 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2758
2759 CodeGenFunction CGF(*this);
2760 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2761 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2764
2765 // Since constructor functions are run in non-descending order of their
2766 // priorities, destructors are registered in non-descending order of their
2767 // priorities, and since destructor functions are run in the reverse order
2768 // of their registration, destructor functions are run in non-ascending
2769 // order of their priorities.
2770 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2771 for (auto *Dtor : Dtors) {
2772 // Register the destructor function calling __cxa_atexit if it is
2773 // available. Otherwise fall back on calling atexit.
2774 if (getCodeGenOpts().CXAAtExit) {
2775 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2776 } else {
2777 // We're assuming that the destructor function is something we can
2778 // reasonably call with the correct CC.
2780 }
2781 }
2782
2783 CGF.FinishFunction();
2784 AddGlobalCtor(GlobalInitFn, Priority);
2785 }
2786
2787 if (getCXXABI().useSinitAndSterm())
2788 unregisterGlobalDtorsWithUnAtExit();
2789}
2790
2791/// Register a global destructor as best as we know how.
2792void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2793 llvm::FunctionCallee dtor,
2794 llvm::Constant *addr) {
2795 if (D.isNoDestroy(CGM.getContext()))
2796 return;
2797
2798 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2799 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2800 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2801 // We can always use __cxa_thread_atexit.
2802 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2803 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2804
2805 // In Apple kexts, we want to add a global destructor entry.
2806 // FIXME: shouldn't this be guarded by some variable?
2807 if (CGM.getLangOpts().AppleKext) {
2808 // Generate a global destructor entry.
2809 return CGM.AddCXXDtorEntry(dtor, addr);
2810 }
2811
2812 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2813}
2814
2817 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2818 // Darwin prefers to have references to thread local variables to go through
2819 // the thread wrapper instead of directly referencing the backing variable.
2820 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2821 CGM.getTarget().getTriple().isOSDarwin();
2822}
2823
2824/// Get the appropriate linkage for the wrapper function. This is essentially
2825/// the weak form of the variable's linkage; every translation unit which needs
2826/// the wrapper emits a copy, and we want the linker to merge them.
2827static llvm::GlobalValue::LinkageTypes
2829 llvm::GlobalValue::LinkageTypes VarLinkage =
2831
2832 // For internal linkage variables, we don't need an external or weak wrapper.
2833 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2834 return VarLinkage;
2835
2836 // If the thread wrapper is replaceable, give it appropriate linkage.
2837 if (isThreadWrapperReplaceable(VD, CGM))
2838 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2839 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2840 return VarLinkage;
2841 return llvm::GlobalValue::WeakODRLinkage;
2842}
2843
2844llvm::Function *
2845ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2846 llvm::Value *Val) {
2847 // Mangle the name for the thread_local wrapper function.
2848 SmallString<256> WrapperName;
2849 {
2850 llvm::raw_svector_ostream Out(WrapperName);
2851 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2852 }
2853
2854 // FIXME: If VD is a definition, we should regenerate the function attributes
2855 // before returning.
2856 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2857 return cast<llvm::Function>(V);
2858
2859 QualType RetQT = VD->getType();
2860 if (RetQT->isReferenceType())
2861 RetQT = RetQT.getNonReferenceType();
2862
2863 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2864 getContext().getPointerType(RetQT), FunctionArgList());
2865
2866 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2867 llvm::Function *Wrapper =
2868 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2869 WrapperName.str(), &CGM.getModule());
2870
2871 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2872 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2873
2874 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2875
2876 // Always resolve references to the wrapper at link time.
2877 if (!Wrapper->hasLocalLinkage())
2878 if (!isThreadWrapperReplaceable(VD, CGM) ||
2879 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2880 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2882 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2883
2884 if (isThreadWrapperReplaceable(VD, CGM)) {
2885 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2886 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2887 }
2888
2889 ThreadWrappers.push_back({VD, Wrapper});
2890 return Wrapper;
2891}
2892
2893void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2894 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2895 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2896 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2897 llvm::Function *InitFunc = nullptr;
2898
2899 // Separate initializers into those with ordered (or partially-ordered)
2900 // initialization and those with unordered initialization.
2902 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2903 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2905 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2906 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2907 CXXThreadLocalInits[I];
2908 else
2909 OrderedInits.push_back(CXXThreadLocalInits[I]);
2910 }
2911
2912 if (!OrderedInits.empty()) {
2913 // Generate a guarded initialization function.
2914 llvm::FunctionType *FTy =
2915 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2917 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2919 /*TLS=*/true);
2920 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2921 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2922 llvm::GlobalVariable::InternalLinkage,
2923 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2924 Guard->setThreadLocal(true);
2925 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2926
2927 CharUnits GuardAlign = CharUnits::One();
2928 Guard->setAlignment(GuardAlign.getAsAlign());
2929
2931 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2932 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2933 if (CGM.getTarget().getTriple().isOSDarwin()) {
2934 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2935 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2936 }
2937 }
2938
2939 // Create declarations for thread wrappers for all thread-local variables
2940 // with non-discardable definitions in this translation unit.
2941 for (const VarDecl *VD : CXXThreadLocals) {
2942 if (VD->hasDefinition() &&
2943 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2944 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2945 getOrCreateThreadLocalWrapper(VD, GV);
2946 }
2947 }
2948
2949 // Emit all referenced thread wrappers.
2950 for (auto VDAndWrapper : ThreadWrappers) {
2951 const VarDecl *VD = VDAndWrapper.first;
2952 llvm::GlobalVariable *Var =
2953 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2954 llvm::Function *Wrapper = VDAndWrapper.second;
2955
2956 // Some targets require that all access to thread local variables go through
2957 // the thread wrapper. This means that we cannot attempt to create a thread
2958 // wrapper or a thread helper.
2959 if (!VD->hasDefinition()) {
2960 if (isThreadWrapperReplaceable(VD, CGM)) {
2961 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2962 continue;
2963 }
2964
2965 // If this isn't a TU in which this variable is defined, the thread
2966 // wrapper is discardable.
2967 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2968 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2969 }
2970
2971 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2972
2973 // Mangle the name for the thread_local initialization function.
2974 SmallString<256> InitFnName;
2975 {
2976 llvm::raw_svector_ostream Out(InitFnName);
2977 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2978 }
2979
2980 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2981
2982 // If we have a definition for the variable, emit the initialization
2983 // function as an alias to the global Init function (if any). Otherwise,
2984 // produce a declaration of the initialization function.
2985 llvm::GlobalValue *Init = nullptr;
2986 bool InitIsInitFunc = false;
2987 bool HasConstantInitialization = false;
2988 if (!usesThreadWrapperFunction(VD)) {
2989 HasConstantInitialization = true;
2990 } else if (VD->hasDefinition()) {
2991 InitIsInitFunc = true;
2992 llvm::Function *InitFuncToUse = InitFunc;
2994 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2995 if (InitFuncToUse)
2996 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2997 InitFuncToUse);
2998 } else {
2999 // Emit a weak global function referring to the initialization function.
3000 // This function will not exist if the TU defining the thread_local
3001 // variable in question does not need any dynamic initialization for
3002 // its thread_local variables.
3003 Init = llvm::Function::Create(InitFnTy,
3004 llvm::GlobalVariable::ExternalWeakLinkage,
3005 InitFnName.str(), &CGM.getModule());
3008 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
3009 }
3010
3011 if (Init) {
3012 Init->setVisibility(Var->getVisibility());
3013 // Don't mark an extern_weak function DSO local on windows.
3014 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3015 Init->setDSOLocal(Var->isDSOLocal());
3016 }
3017
3018 llvm::LLVMContext &Context = CGM.getModule().getContext();
3019
3020 // The linker on AIX is not happy with missing weak symbols. However,
3021 // other TUs will not know whether the initialization routine exists
3022 // so create an empty, init function to satisfy the linker.
3023 // This is needed whenever a thread wrapper function is not used, and
3024 // also when the symbol is weak.
3025 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3026 isEmittedWithConstantInitializer(VD, true) &&
3027 !mayNeedDestruction(VD)) {
3028 // Init should be null. If it were non-null, then the logic above would
3029 // either be defining the function to be an alias or declaring the
3030 // function with the expectation that the definition of the variable
3031 // is elsewhere.
3032 assert(Init == nullptr && "Expected Init to be null.");
3033
3034 llvm::Function *Func = llvm::Function::Create(
3035 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
3038 cast<llvm::Function>(Func),
3039 /*IsThunk=*/false);
3040 // Create a function body that just returns
3041 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
3042 CGBuilderTy Builder(CGM, Entry);
3043 Builder.CreateRetVoid();
3044 }
3045
3046 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
3047 CGBuilderTy Builder(CGM, Entry);
3048 if (HasConstantInitialization) {
3049 // No dynamic initialization to invoke.
3050 } else if (InitIsInitFunc) {
3051 if (Init) {
3052 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
3053 if (isThreadWrapperReplaceable(VD, CGM)) {
3054 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3055 llvm::Function *Fn =
3056 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3057 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3058 }
3059 }
3060 } else if (CGM.getTriple().isOSAIX()) {
3061 // On AIX, except if constinit and also neither of class type or of
3062 // (possibly multi-dimensional) array of class type, thread_local vars
3063 // will have init routines regardless of whether they are
3064 // const-initialized. Since the routine is guaranteed to exist, we can
3065 // unconditionally call it without testing for its existance. This
3066 // avoids potentially unresolved weak symbols which the AIX linker
3067 // isn't happy with.
3068 Builder.CreateCall(InitFnTy, Init);
3069 } else {
3070 // Don't know whether we have an init function. Call it if it exists.
3071 llvm::Value *Have = Builder.CreateIsNotNull(Init);
3072 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3073 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3074 Builder.CreateCondBr(Have, InitBB, ExitBB);
3075
3076 Builder.SetInsertPoint(InitBB);
3077 Builder.CreateCall(InitFnTy, Init);
3078 Builder.CreateBr(ExitBB);
3079
3080 Builder.SetInsertPoint(ExitBB);
3081 }
3082
3083 // For a reference, the result of the wrapper function is a pointer to
3084 // the referenced object.
3085 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
3086
3087 if (VD->getType()->isReferenceType()) {
3088 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3089 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3090 }
3091 if (Val->getType() != Wrapper->getReturnType())
3092 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
3093 Val, Wrapper->getReturnType(), "");
3094
3095 Builder.CreateRet(Val);
3096 }
3097}
3098
3099LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3100 const VarDecl *VD,
3101 QualType LValType) {
3102 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3103 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3104
3105 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3106 CallVal->setCallingConv(Wrapper->getCallingConv());
3107
3108 LValue LV;
3109 if (VD->getType()->isReferenceType())
3110 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3111 else
3112 LV = CGF.MakeAddrLValue(CallVal, LValType,
3113 CGF.getContext().getDeclAlign(VD));
3114 // FIXME: need setObjCGCLValueClass?
3115 return LV;
3116}
3117
3118/// Return whether the given global decl needs a VTT parameter, which it does
3119/// if it's a base constructor or destructor with virtual bases.
3120bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3121 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3122
3123 // We don't have any virtual bases, just return early.
3124 if (!MD->getParent()->getNumVBases())
3125 return false;
3126
3127 // Check if we have a base constructor.
3128 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3129 return true;
3130
3131 // Check if we have a base destructor.
3132 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3133 return true;
3134
3135 return false;
3136}
3137
3138namespace {
3139class ItaniumRTTIBuilder {
3140 CodeGenModule &CGM; // Per-module state.
3141 llvm::LLVMContext &VMContext;
3142 const ItaniumCXXABI &CXXABI; // Per-module state.
3143
3144 /// Fields - The fields of the RTTI descriptor currently being built.
3146
3147 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3148 llvm::GlobalVariable *
3149 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3150
3151 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3152 /// descriptor of the given type.
3153 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3154
3155 /// BuildVTablePointer - Build the vtable pointer for the given type.
3156 void BuildVTablePointer(const Type *Ty);
3157
3158 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3159 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3160 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3161
3162 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3163 /// classes with bases that do not satisfy the abi::__si_class_type_info
3164 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3165 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3166
3167 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3168 /// for pointer types.
3169 void BuildPointerTypeInfo(QualType PointeeTy);
3170
3171 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3172 /// type_info for an object type.
3173 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3174
3175 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3176 /// struct, used for member pointer types.
3177 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3178
3179public:
3180 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3181 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3182
3183 // Pointer type info flags.
3184 enum {
3185 /// PTI_Const - Type has const qualifier.
3186 PTI_Const = 0x1,
3187
3188 /// PTI_Volatile - Type has volatile qualifier.
3189 PTI_Volatile = 0x2,
3190
3191 /// PTI_Restrict - Type has restrict qualifier.
3192 PTI_Restrict = 0x4,
3193
3194 /// PTI_Incomplete - Type is incomplete.
3195 PTI_Incomplete = 0x8,
3196
3197 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3198 /// (in pointer to member).
3199 PTI_ContainingClassIncomplete = 0x10,
3200
3201 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3202 //PTI_TransactionSafe = 0x20,
3203
3204 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3205 PTI_Noexcept = 0x40,
3206 };
3207
3208 // VMI type info flags.
3209 enum {
3210 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3211 VMI_NonDiamondRepeat = 0x1,
3212
3213 /// VMI_DiamondShaped - Class is diamond shaped.
3214 VMI_DiamondShaped = 0x2
3215 };
3216
3217 // Base class type info flags.
3218 enum {
3219 /// BCTI_Virtual - Base class is virtual.
3220 BCTI_Virtual = 0x1,
3221
3222 /// BCTI_Public - Base class is public.
3223 BCTI_Public = 0x2
3224 };
3225
3226 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3227 /// link to an existing RTTI descriptor if one already exists.
3228 llvm::Constant *BuildTypeInfo(QualType Ty);
3229
3230 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3231 llvm::Constant *BuildTypeInfo(
3232 QualType Ty,
3233 llvm::GlobalVariable::LinkageTypes Linkage,
3234 llvm::GlobalValue::VisibilityTypes Visibility,
3235 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3236};
3237}
3238
3239llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3240 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3241 SmallString<256> Name;
3242 llvm::raw_svector_ostream Out(Name);
3244
3245 // We know that the mangled name of the type starts at index 4 of the
3246 // mangled name of the typename, so we can just index into it in order to
3247 // get the mangled name of the type.
3248 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3249 Name.substr(4));
3250 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3251
3252 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3253 Name, Init->getType(), Linkage, Align.getAsAlign());
3254
3255 GV->setInitializer(Init);
3256
3257 return GV;
3258}
3259
3260llvm::Constant *
3261ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3262 // Mangle the RTTI name.
3263 SmallString<256> Name;
3264 llvm::raw_svector_ostream Out(Name);
3265 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3266
3267 // Look for an existing global.
3268 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3269
3270 if (!GV) {
3271 // Create a new global variable.
3272 // Note for the future: If we would ever like to do deferred emission of
3273 // RTTI, check if emitting vtables opportunistically need any adjustment.
3274
3275 GV = new llvm::GlobalVariable(
3276 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3277 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3278 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3279 CGM.setGVProperties(GV, RD);
3280 // Import the typeinfo symbol when all non-inline virtual methods are
3281 // imported.
3282 if (CGM.getTarget().hasPS4DLLImportExport()) {
3283 if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3284 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3285 CGM.setDSOLocal(GV);
3286 }
3287 }
3288 }
3289
3290 return GV;
3291}
3292
3293/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3294/// info for that type is defined in the standard library.
3296 // Itanium C++ ABI 2.9.2:
3297 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3298 // the run-time support library. Specifically, the run-time support
3299 // library should contain type_info objects for the types X, X* and
3300 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3301 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3302 // long, unsigned long, long long, unsigned long long, float, double,
3303 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3304 // half-precision floating point types.
3305 //
3306 // GCC also emits RTTI for __int128.
3307 // FIXME: We do not emit RTTI information for decimal types here.
3308
3309 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3310 switch (Ty->getKind()) {
3311 case BuiltinType::Void:
3312 case BuiltinType::NullPtr:
3313 case BuiltinType::Bool:
3314 case BuiltinType::WChar_S:
3315 case BuiltinType::WChar_U:
3316 case BuiltinType::Char_U:
3317 case BuiltinType::Char_S:
3318 case BuiltinType::UChar:
3319 case BuiltinType::SChar:
3320 case BuiltinType::Short:
3321 case BuiltinType::UShort:
3322 case BuiltinType::Int:
3323 case BuiltinType::UInt:
3324 case BuiltinType::Long:
3325 case BuiltinType::ULong:
3326 case BuiltinType::LongLong:
3327 case BuiltinType::ULongLong:
3328 case BuiltinType::Half:
3329 case BuiltinType::Float:
3330 case BuiltinType::Double:
3331 case BuiltinType::LongDouble:
3332 case BuiltinType::Float16:
3333 case BuiltinType::Float128:
3334 case BuiltinType::Ibm128:
3335 case BuiltinType::Char8:
3336 case BuiltinType::Char16:
3337 case BuiltinType::Char32:
3338 case BuiltinType::Int128:
3339 case BuiltinType::UInt128:
3340 return true;
3341
3342#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3343 case BuiltinType::Id:
3344#include "clang/Basic/OpenCLImageTypes.def"
3345#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3346 case BuiltinType::Id:
3347#include "clang/Basic/OpenCLExtensionTypes.def"
3348 case BuiltinType::OCLSampler:
3349 case BuiltinType::OCLEvent:
3350 case BuiltinType::OCLClkEvent:
3351 case BuiltinType::OCLQueue:
3352 case BuiltinType::OCLReserveID:
3353#define SVE_TYPE(Name, Id, SingletonId) \
3354 case BuiltinType::Id:
3355#include "clang/Basic/AArch64SVEACLETypes.def"
3356#define PPC_VECTOR_TYPE(Name, Id, Size) \
3357 case BuiltinType::Id:
3358#include "clang/Basic/PPCTypes.def"
3359#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3360#include "clang/Basic/RISCVVTypes.def"
3361#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3362#include "clang/Basic/WebAssemblyReferenceTypes.def"
3363 case BuiltinType::ShortAccum:
3364 case BuiltinType::Accum:
3365 case BuiltinType::LongAccum:
3366 case BuiltinType::UShortAccum:
3367 case BuiltinType::UAccum:
3368 case BuiltinType::ULongAccum:
3369 case BuiltinType::ShortFract:
3370 case BuiltinType::Fract:
3371 case BuiltinType::LongFract:
3372 case BuiltinType::UShortFract:
3373 case BuiltinType::UFract:
3374 case BuiltinType::ULongFract:
3375 case BuiltinType::SatShortAccum:
3376 case BuiltinType::SatAccum:
3377 case BuiltinType::SatLongAccum:
3378 case BuiltinType::SatUShortAccum:
3379 case BuiltinType::SatUAccum:
3380 case BuiltinType::SatULongAccum:
3381 case BuiltinType::SatShortFract:
3382 case BuiltinType::SatFract:
3383 case BuiltinType::SatLongFract:
3384 case BuiltinType::SatUShortFract:
3385 case BuiltinType::SatUFract:
3386 case BuiltinType::SatULongFract:
3387 case BuiltinType::BFloat16:
3388 return false;
3389
3390 case BuiltinType::Dependent:
3391#define BUILTIN_TYPE(Id, SingletonId)
3392#define PLACEHOLDER_TYPE(Id, SingletonId) \
3393 case BuiltinType::Id:
3394#include "clang/AST/BuiltinTypes.def"
3395 llvm_unreachable("asking for RRTI for a placeholder type!");
3396
3397 case BuiltinType::ObjCId:
3398 case BuiltinType::ObjCClass:
3399 case BuiltinType::ObjCSel:
3400 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3401 }
3402
3403 llvm_unreachable("Invalid BuiltinType Kind!");
3404}
3405
3406static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3407 QualType PointeeTy = PointerTy->getPointeeType();
3408 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3409 if (!BuiltinTy)
3410 return false;
3411
3412 // Check the qualifiers.
3413 Qualifiers Quals = PointeeTy.getQualifiers();
3414 Quals.removeConst();
3415
3416 if (!Quals.empty())
3417 return false;
3418
3419 return TypeInfoIsInStandardLibrary(BuiltinTy);
3420}
3421
3422/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3423/// information for the given type exists in the standard library.
3425 // Type info for builtin types is defined in the standard library.
3426 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3427 return TypeInfoIsInStandardLibrary(BuiltinTy);
3428
3429 // Type info for some pointer types to builtin types is defined in the
3430 // standard library.
3431 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3432 return TypeInfoIsInStandardLibrary(PointerTy);
3433
3434 return false;
3435}
3436
3437/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3438/// the given type exists somewhere else, and that we should not emit the type
3439/// information in this translation unit. Assumes that it is not a
3440/// standard-library type.
3442 QualType Ty) {
3443 ASTContext &Context = CGM.getContext();
3444
3445 // If RTTI is disabled, assume it might be disabled in the
3446 // translation unit that defines any potential key function, too.
3447 if (!Context.getLangOpts().RTTI) return false;
3448
3449 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3450 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3451 if (!RD->hasDefinition())
3452 return false;
3453
3454 if (!RD->isDynamicClass())
3455 return false;
3456
3457 // FIXME: this may need to be reconsidered if the key function
3458 // changes.
3459 // N.B. We must always emit the RTTI data ourselves if there exists a key
3460 // function.
3461 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3462
3463 // Don't import the RTTI but emit it locally.
3464 if (CGM.getTriple().isWindowsGNUEnvironment())
3465 return false;
3466
3467 if (CGM.getVTables().isVTableExternal(RD)) {
3468 if (CGM.getTarget().hasPS4DLLImportExport())
3469 return true;
3470
3471 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3472 ? false
3473 : true;
3474 }
3475 if (IsDLLImport)
3476 return true;
3477 }
3478
3479 return false;
3480}
3481
3482/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3483static bool IsIncompleteClassType(const RecordType *RecordTy) {
3484 return !RecordTy->getDecl()->isCompleteDefinition();
3485}
3486
3487/// ContainsIncompleteClassType - Returns whether the given type contains an
3488/// incomplete class type. This is true if
3489///
3490/// * The given type is an incomplete class type.
3491/// * The given type is a pointer type whose pointee type contains an
3492/// incomplete class type.
3493/// * The given type is a member pointer type whose class is an incomplete
3494/// class type.
3495/// * The given type is a member pointer type whoise pointee type contains an
3496/// incomplete class type.
3497/// is an indirect or direct pointer to an incomplete class type.
3499 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3500 if (IsIncompleteClassType(RecordTy))
3501 return true;
3502 }
3503
3504 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3505 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3506
3507 if (const MemberPointerType *MemberPointerTy =
3508 dyn_cast<MemberPointerType>(Ty)) {
3509 // Check if the class type is incomplete.
3510 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3511 if (IsIncompleteClassType(ClassType))
3512 return true;
3513
3514 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3515 }
3516
3517 return false;
3518}
3519
3520// CanUseSingleInheritance - Return whether the given record decl has a "single,
3521// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3522// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3524 // Check the number of bases.
3525 if (RD->getNumBases() != 1)
3526 return false;
3527
3528 // Get the base.
3530
3531 // Check that the base is not virtual.
3532 if (Base->isVirtual())
3533 return false;
3534
3535 // Check that the base is public.
3536 if (Base->getAccessSpecifier() != AS_public)
3537 return false;
3538
3539 // Check that the class is dynamic iff the base is.
3540 auto *BaseDecl =
3541 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3542 if (!BaseDecl->isEmpty() &&
3543 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3544 return false;
3545
3546 return true;
3547}
3548
3549void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3550 // abi::__class_type_info.
3551 static const char * const ClassTypeInfo =
3552 "_ZTVN10__cxxabiv117__class_type_infoE";
3553 // abi::__si_class_type_info.
3554 static const char * const SIClassTypeInfo =
3555 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3556 // abi::__vmi_class_type_info.
3557 static const char * const VMIClassTypeInfo =
3558 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3559
3560 const char *VTableName = nullptr;
3561
3562 switch (Ty->getTypeClass()) {
3563#define TYPE(Class, Base)
3564#define ABSTRACT_TYPE(Class, Base)
3565#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3566#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3567#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3568#include "clang/AST/TypeNodes.inc"
3569 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3570
3571 case Type::LValueReference:
3572 case Type::RValueReference:
3573 llvm_unreachable("References shouldn't get here");
3574
3575 case Type::Auto:
3576 case Type::DeducedTemplateSpecialization:
3577 llvm_unreachable("Undeduced type shouldn't get here");
3578
3579 case Type::Pipe:
3580 llvm_unreachable("Pipe types shouldn't get here");
3581
3582 case Type::Builtin:
3583 case Type::BitInt:
3584 // GCC treats vector and complex types as fundamental types.
3585 case Type::Vector:
3586 case Type::ExtVector:
3587 case Type::ConstantMatrix:
3588 case Type::Complex:
3589 case Type::Atomic:
3590 // FIXME: GCC treats block pointers as fundamental types?!
3591 case Type::BlockPointer:
3592 // abi::__fundamental_type_info.
3593 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3594 break;
3595
3596 case Type::ConstantArray:
3597 case Type::IncompleteArray:
3598 case Type::VariableArray:
3599 // abi::__array_type_info.
3600 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3601 break;
3602
3603 case Type::FunctionNoProto:
3604 case Type::FunctionProto:
3605 // abi::__function_type_info.
3606 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3607 break;
3608
3609 case Type::Enum:
3610 // abi::__enum_type_info.
3611 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3612 break;
3613
3614 case Type::Record: {
3615 const CXXRecordDecl *RD =
3616 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3617
3618 if (!RD->hasDefinition() || !RD->getNumBases()) {
3619 VTableName = ClassTypeInfo;
3620 } else if (CanUseSingleInheritance(RD)) {
3621 VTableName = SIClassTypeInfo;
3622 } else {
3623 VTableName = VMIClassTypeInfo;
3624 }
3625
3626 break;
3627 }
3628
3629 case Type::ObjCObject:
3630 // Ignore protocol qualifiers.
3631 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3632
3633 // Handle id and Class.
3634 if (isa<BuiltinType>(Ty)) {
3635 VTableName = ClassTypeInfo;
3636 break;
3637 }
3638
3639 assert(isa<ObjCInterfaceType>(Ty));
3640 [[fallthrough]];
3641
3642 case Type::ObjCInterface:
3643 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3644 VTableName = SIClassTypeInfo;
3645 } else {
3646 VTableName = ClassTypeInfo;
3647 }
3648 break;
3649
3650 case Type::ObjCObjectPointer:
3651 case Type::Pointer:
3652 // abi::__pointer_type_info.
3653 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3654 break;
3655
3656 case Type::MemberPointer:
3657 // abi::__pointer_to_member_type_info.
3658 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3659 break;
3660 }
3661
3662 llvm::Constant *VTable = nullptr;
3663
3664 // Check if the alias exists. If it doesn't, then get or create the global.
3666 VTable = CGM.getModule().getNamedAlias(VTableName);
3667 if (!VTable) {
3668 llvm::Type *Ty = llvm::ArrayType::get(CGM.GlobalsInt8PtrTy, 0);
3669 VTable = CGM.getModule().getOrInsertGlobal(VTableName, Ty);
3670 }
3671
3672 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3673
3674 llvm::Type *PtrDiffTy =
3676
3677 // The vtable address point is 2.
3679 // The vtable address point is 8 bytes after its start:
3680 // 4 for the offset to top + 4 for the relative offset to rtti.
3681 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3682 VTable =
3683 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3684 } else {
3685 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3686 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy,
3687 VTable, Two);
3688 }
3689
3690 Fields.push_back(VTable);
3691}
3692
3693/// Return the linkage that the type info and type info name constants
3694/// should have for the given type.
3695static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3696 QualType Ty) {
3697 // Itanium C++ ABI 2.9.5p7:
3698 // In addition, it and all of the intermediate abi::__pointer_type_info
3699 // structs in the chain down to the abi::__class_type_info for the
3700 // incomplete class type must be prevented from resolving to the
3701 // corresponding type_info structs for the complete class type, possibly
3702 // by making them local static objects. Finally, a dummy class RTTI is
3703 // generated for the incomplete type that will not resolve to the final
3704 // complete class RTTI (because the latter need not exist), possibly by
3705 // making it a local static object.
3707 return llvm::GlobalValue::InternalLinkage;
3708
3709 switch (Ty->getLinkage()) {
3710 case NoLinkage:
3711 case InternalLinkage:
3713 return llvm::GlobalValue::InternalLinkage;
3714
3715 case VisibleNoLinkage:
3716 case ModuleLinkage:
3717 case ExternalLinkage:
3718 // RTTI is not enabled, which means that this type info struct is going
3719 // to be used for exception handling. Give it linkonce_odr linkage.
3720 if (!CGM.getLangOpts().RTTI)
3721 return llvm::GlobalValue::LinkOnceODRLinkage;
3722
3723 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3724 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3725 if (RD->hasAttr<WeakAttr>())
3726 return llvm::GlobalValue::WeakODRLinkage;
3727 if (CGM.getTriple().isWindowsItaniumEnvironment())
3728 if (RD->hasAttr<DLLImportAttr>() &&
3730 return llvm::GlobalValue::ExternalLinkage;
3731 // MinGW always uses LinkOnceODRLinkage for type info.
3732 if (RD->isDynamicClass() &&
3733 !CGM.getContext()
3734 .getTargetInfo()
3735 .getTriple()
3736 .isWindowsGNUEnvironment())
3737 return CGM.getVTableLinkage(RD);
3738 }
3739
3740 return llvm::GlobalValue::LinkOnceODRLinkage;
3741 }
3742
3743 llvm_unreachable("Invalid linkage!");
3744}
3745
3746llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3747 // We want to operate on the canonical type.
3748 Ty = Ty.getCanonicalType();
3749
3750 // Check if we've already emitted an RTTI descriptor for this type.
3751 SmallString<256> Name;
3752 llvm::raw_svector_ostream Out(Name);
3753 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3754
3755 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3756 if (OldGV && !OldGV->isDeclaration()) {
3757 assert(!OldGV->hasAvailableExternallyLinkage() &&
3758 "available_externally typeinfos not yet implemented");
3759
3760 return OldGV;
3761 }
3762
3763 // Check if there is already an external RTTI descriptor for this type.
3766 return GetAddrOfExternalRTTIDescriptor(Ty);
3767
3768 // Emit the standard library with external linkage.
3769 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3770
3771 // Give the type_info object and name the formal visibility of the
3772 // type itself.
3773 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3774 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3775 // If the linkage is local, only default visibility makes sense.
3776 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3777 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3778 ItaniumCXXABI::RUK_NonUniqueHidden)
3779 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3780 else
3781 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3782
3783 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3784 llvm::GlobalValue::DefaultStorageClass;
3785 if (auto RD = Ty->getAsCXXRecordDecl()) {
3786 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3787 RD->hasAttr<DLLExportAttr>()) ||
3789 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3790 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3791 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3792 }
3793 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3794}
3795
3796llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3797 QualType Ty,
3798 llvm::GlobalVariable::LinkageTypes Linkage,
3799 llvm::GlobalValue::VisibilityTypes Visibility,
3800 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3801 // Add the vtable pointer.
3802 BuildVTablePointer(cast<Type>(Ty));
3803
3804 // And the name.
3805 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3806 llvm::Constant *TypeNameField;
3807
3808 // If we're supposed to demote the visibility, be sure to set a flag
3809 // to use a string comparison for type_info comparisons.
3810 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3811 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3812 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3813 // The flag is the sign bit, which on ARM64 is defined to be clear
3814 // for global pointers. This is very ARM64-specific.
3815 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3816 llvm::Constant *flag =
3817 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3818 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3819 TypeNameField =
3820 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy);
3821 } else {
3822 TypeNameField = TypeName;
3823 }
3824 Fields.push_back(TypeNameField);
3825
3826 switch (Ty->getTypeClass()) {
3827#define TYPE(Class, Base)
3828#define ABSTRACT_TYPE(Class, Base)
3829#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3830#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3831#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3832#include "clang/AST/TypeNodes.inc"
3833 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3834
3835 // GCC treats vector types as fundamental types.
3836 case Type::Builtin:
3837 case Type::Vector:
3838 case Type::ExtVector:
3839 case Type::ConstantMatrix:
3840 case Type::Complex:
3841 case Type::BlockPointer:
3842 // Itanium C++ ABI 2.9.5p4:
3843 // abi::__fundamental_type_info adds no data members to std::type_info.
3844 break;
3845
3846 case Type::LValueReference:
3847 case Type::RValueReference:
3848 llvm_unreachable("References shouldn't get here");
3849
3850 case Type::Auto:
3851 case Type::DeducedTemplateSpecialization:
3852 llvm_unreachable("Undeduced type shouldn't get here");
3853
3854 case Type::Pipe:
3855 break;
3856
3857 case Type::BitInt:
3858 break;
3859
3860 case Type::ConstantArray:
3861 case Type::IncompleteArray:
3862 case Type::VariableArray:
3863 // Itanium C++ ABI 2.9.5p5:
3864 // abi::__array_type_info adds no data members to std::type_info.
3865 break;
3866
3867 case Type::FunctionNoProto:
3868 case Type::FunctionProto:
3869 // Itanium C++ ABI 2.9.5p5:
3870 // abi::__function_type_info adds no data members to std::type_info.
3871 break;
3872
3873 case Type::Enum:
3874 // Itanium C++ ABI 2.9.5p5:
3875 // abi::__enum_type_info adds no data members to std::type_info.
3876 break;
3877
3878 case Type::Record: {
3879 const CXXRecordDecl *RD =
3880 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3881 if (!RD->hasDefinition() || !RD->getNumBases()) {
3882 // We don't need to emit any fields.
3883 break;
3884 }
3885
3887 BuildSIClassTypeInfo(RD);
3888 else
3889 BuildVMIClassTypeInfo(RD);
3890
3891 break;
3892 }
3893
3894 case Type::ObjCObject:
3895 case Type::ObjCInterface:
3896 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3897 break;
3898
3899 case Type::ObjCObjectPointer:
3900 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3901 break;
3902
3903 case Type::Pointer:
3904 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3905 break;
3906
3907 case Type::MemberPointer:
3908 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3909 break;
3910
3911 case Type::Atomic:
3912 // No fields, at least for the moment.
3913 break;
3914 }
3915
3916 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3917
3918 SmallString<256> Name;
3919 llvm::raw_svector_ostream Out(Name);
3920 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3921 llvm::Module &M = CGM.getModule();
3922 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3923 llvm::GlobalVariable *GV =
3924 new llvm::GlobalVariable(M, Init->getType(),
3925 /*isConstant=*/true, Linkage, Init, Name);
3926
3927 // Export the typeinfo in the same circumstances as the vtable is exported.
3928 auto GVDLLStorageClass = DLLStorageClass;
3929 if (CGM.getTarget().hasPS4DLLImportExport()) {
3930 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3931 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3932 if (RD->hasAttr<DLLExportAttr>() ||
3933 CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3934 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3935 }
3936 }
3937 }
3938
3939 // If there's already an old global variable, replace it with the new one.
3940 if (OldGV) {
3941 GV->takeName(OldGV);
3942 llvm::Constant *NewPtr =
3943 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3944 OldGV->replaceAllUsesWith(NewPtr);
3945 OldGV->eraseFromParent();
3946 }
3947
3948 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3949 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3950
3953 GV->setAlignment(Align.getAsAlign());
3954
3955 // The Itanium ABI specifies that type_info objects must be globally
3956 // unique, with one exception: if the type is an incomplete class
3957 // type or a (possibly indirect) pointer to one. That exception
3958 // affects the general case of comparing type_info objects produced
3959 // by the typeid operator, which is why the comparison operators on
3960 // std::type_info generally use the type_info name pointers instead
3961 // of the object addresses. However, the language's built-in uses
3962 // of RTTI generally require class types to be complete, even when
3963 // manipulating pointers to those class types. This allows the
3964 // implementation of dynamic_cast to rely on address equality tests,
3965 // which is much faster.
3966
3967 // All of this is to say that it's important that both the type_info
3968 // object and the type_info name be uniqued when weakly emitted.
3969
3970 TypeName->setVisibility(Visibility);
3971 CGM.setDSOLocal(TypeName);
3972
3973 GV->setVisibility(Visibility);
3974 CGM.setDSOLocal(GV);
3975
3976 TypeName->setDLLStorageClass(DLLStorageClass);
3977 GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3978 ? GVDLLStorageClass
3979 : DLLStorageClass);
3980
3981 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3982 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3983
3984 return GV;
3985}
3986
3987/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3988/// for the given Objective-C object type.
3989void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3990 // Drop qualifiers.
3991 const Type *T = OT->getBaseType().getTypePtr();
3992 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3993
3994 // The builtin types are abi::__class_type_infos and don't require
3995 // extra fields.
3996 if (isa<BuiltinType>(T)) return;
3997
3998 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3999 ObjCInterfaceDecl *Super = Class->getSuperClass();
4000
4001 // Root classes are also __class_type_info.
4002 if (!Super) return;
4003
4004 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
4005
4006 // Everything else is single inheritance.
4007 llvm::Constant *BaseTypeInfo =
4008 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
4009 Fields.push_back(BaseTypeInfo);
4010}
4011
4012/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4013/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4014void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4015 // Itanium C++ ABI 2.9.5p6b:
4016 // It adds to abi::__class_type_info a single member pointing to the
4017 // type_info structure for the base type,
4018 llvm::Constant *BaseTypeInfo =
4019 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
4020 Fields.push_back(BaseTypeInfo);
4021}
4022
4023namespace {
4024 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4025 /// a class hierarchy.
4026 struct SeenBases {
4029 };
4030}
4031
4032/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4033/// abi::__vmi_class_type_info.
4034///
4036 SeenBases &Bases) {
4037
4038 unsigned Flags = 0;
4039
4040 auto *BaseDecl =
4041 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
4042
4043 if (Base->isVirtual()) {
4044 // Mark the virtual base as seen.
4045 if (!Bases.VirtualBases.insert(BaseDecl).second) {
4046 // If this virtual base has been seen before, then the class is diamond
4047 // shaped.
4048 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4049 } else {
4050 if (Bases.NonVirtualBases.count(BaseDecl))
4051 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4052 }
4053 } else {
4054 // Mark the non-virtual base as seen.
4055 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
4056 // If this non-virtual base has been seen before, then the class has non-
4057 // diamond shaped repeated inheritance.
4058 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4059 } else {
4060 if (Bases.VirtualBases.count(BaseDecl))
4061 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4062 }
4063 }
4064
4065 // Walk all bases.
4066 for (const auto &I : BaseDecl->bases())
4067 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4068
4069 return Flags;
4070}
4071
4073 unsigned Flags = 0;
4074 SeenBases Bases;
4075
4076 // Walk all bases.
4077 for (const auto &I : RD->bases())
4078 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4079
4080 return Flags;
4081}
4082
4083/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4084/// classes with bases that do not satisfy the abi::__si_class_type_info
4085/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4086void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4087 llvm::Type *UnsignedIntLTy =
4089
4090 // Itanium C++ ABI 2.9.5p6c:
4091 // __flags is a word with flags describing details about the class
4092 // structure, which may be referenced by using the __flags_masks
4093 // enumeration. These flags refer to both direct and indirect bases.
4094 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4095 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4096
4097 // Itanium C++ ABI 2.9.5p6c:
4098 // __base_count is a word with the number of direct proper base class
4099 // descriptions that follow.
4100 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4101
4102 if (!RD->getNumBases())
4103 return;
4104
4105 // Now add the base class descriptions.
4106
4107 // Itanium C++ ABI 2.9.5p6c:
4108 // __base_info[] is an array of base class descriptions -- one for every
4109 // direct proper base. Each description is of the type:
4110 //
4111 // struct abi::__base_class_type_info {
4112 // public:
4113 // const __class_type_info *__base_type;
4114 // long __offset_flags;
4115 //
4116 // enum __offset_flags_masks {
4117 // __virtual_mask = 0x1,
4118 // __public_mask = 0x2,
4119 // __offset_shift = 8
4120 // };
4121 // };
4122
4123 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4124 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4125 // LLP64 platforms.
4126 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4127 // LLP64 platforms.
4128 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4129 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4130 if (TI.getTriple().isOSCygMing() &&
4132 OffsetFlagsTy = CGM.getContext().LongLongTy;
4133 llvm::Type *OffsetFlagsLTy =
4134 CGM.getTypes().ConvertType(OffsetFlagsTy);
4135
4136 for (const auto &Base : RD->bases()) {
4137 // The __base_type member points to the RTTI for the base type.
4138 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4139
4140 auto *BaseDecl =
4141 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4142
4143 int64_t OffsetFlags = 0;
4144
4145 // All but the lower 8 bits of __offset_flags are a signed offset.
4146 // For a non-virtual base, this is the offset in the object of the base
4147 // subobject. For a virtual base, this is the offset in the virtual table of
4148 // the virtual base offset for the virtual base referenced (negative).
4149 CharUnits Offset;
4150 if (Base.isVirtual())
4151 Offset =
4153 else {
4154 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4155 Offset = Layout.getBaseClassOffset(BaseDecl);
4156 };
4157
4158 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4159
4160 // The low-order byte of __offset_flags contains flags, as given by the
4161 // masks from the enumeration __offset_flags_masks.
4162 if (Base.isVirtual())
4163 OffsetFlags |= BCTI_Virtual;
4164 if (Base.getAccessSpecifier() == AS_public)
4165 OffsetFlags |= BCTI_Public;
4166
4167 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4168 }
4169}
4170
4171/// Compute the flags for a __pbase_type_info, and remove the corresponding
4172/// pieces from \p Type.
4174 unsigned Flags = 0;
4175
4176 if (Type.isConstQualified())
4177 Flags |= ItaniumRTTIBuilder::PTI_Const;
4178 if (Type.isVolatileQualified())
4179 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4180 if (Type.isRestrictQualified())
4181 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4182 Type = Type.getUnqualifiedType();
4183
4184 // Itanium C++ ABI 2.9.5p7:
4185 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4186 // incomplete class type, the incomplete target type flag is set.
4188 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4189
4190 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4191 if (Proto->isNothrow()) {
4192 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4194 }
4195 }
4196
4197 return Flags;
4198}
4199
4200/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4201/// used for pointer types.
4202void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4203 // Itanium C++ ABI 2.9.5p7:
4204 // __flags is a flag word describing the cv-qualification and other
4205 // attributes of the type pointed to
4206 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4207
4208 llvm::Type *UnsignedIntLTy =
4210 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4211
4212 // Itanium C++ ABI 2.9.5p7:
4213 // __pointee is a pointer to the std::type_info derivation for the
4214 // unqualified type being pointed to.
4215 llvm::Constant *PointeeTypeInfo =
4216 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4217 Fields.push_back(PointeeTypeInfo);
4218}
4219
4220/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4221/// struct, used for member pointer types.
4222void
4223ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4224 QualType PointeeTy = Ty->getPointeeType();
4225
4226 // Itanium C++ ABI 2.9.5p7:
4227 // __flags is a flag word describing the cv-qualification and other
4228 // attributes of the type pointed to.
4229 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4230
4231 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4232 if (IsIncompleteClassType(ClassType))
4233 Flags |= PTI_ContainingClassIncomplete;
4234
4235 llvm::Type *UnsignedIntLTy =
4237 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4238
4239 // Itanium C++ ABI 2.9.5p7:
4240 // __pointee is a pointer to the std::type_info derivation for the
4241 // unqualified type being pointed to.
4242 llvm::Constant *PointeeTypeInfo =
4243 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4244 Fields.push_back(PointeeTypeInfo);
4245
4246 // Itanium C++ ABI 2.9.5p9:
4247 // __context is a pointer to an abi::__class_type_info corresponding to the
4248 // class type containing the member pointed to
4249 // (e.g., the "A" in "int A::*").
4250 Fields.push_back(
4251 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4252}
4253
4254llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4255 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4256}
4257
4258void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4259 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4260 QualType FundamentalTypes[] = {
4261 getContext().VoidTy, getContext().NullPtrTy,
4262 getContext().BoolTy, getContext().WCharTy,
4263 getContext().CharTy, getContext().UnsignedCharTy,
4264 getContext().SignedCharTy, getContext().ShortTy,
4265 getContext().UnsignedShortTy, getContext().IntTy,
4266 getContext().UnsignedIntTy, getContext().LongTy,
4267 getContext().UnsignedLongTy, getContext().LongLongTy,
4268 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4269 getContext().UnsignedInt128Ty, getContext().HalfTy,
4270 getContext().FloatTy, getContext().DoubleTy,
4271 getContext().LongDoubleTy, getContext().Float128Ty,
4272 getContext().Char8Ty, getContext().Char16Ty,
4273 getContext().Char32Ty
4274 };
4275 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4276 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4277 ? llvm::GlobalValue::DLLExportStorageClass
4278 : llvm::GlobalValue::DefaultStorageClass;
4279 llvm::GlobalValue::VisibilityTypes Visibility =
4281 for (const QualType &FundamentalType : FundamentalTypes) {
4282 QualType PointerType = getContext().getPointerType(FundamentalType);
4283 QualType PointerTypeConst = getContext().getPointerType(
4284 FundamentalType.withConst());
4285 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4286 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4287 Type, llvm::GlobalValue::ExternalLinkage,
4288 Visibility, DLLStorageClass);
4289 }
4290}
4291
4292/// What sort of uniqueness rules should we use for the RTTI for the
4293/// given type?
4294ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4295 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4296 if (shouldRTTIBeUnique())
4297 return RUK_Unique;
4298
4299 // It's only necessary for linkonce_odr or weak_odr linkage.
4300 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4301 Linkage != llvm::GlobalValue::WeakODRLinkage)
4302 return RUK_Unique;
4303
4304 // It's only necessary with default visibility.
4305 if (CanTy->getVisibility() != DefaultVisibility)
4306 return RUK_Unique;
4307
4308 // If we're not required to publish this symbol, hide it.
4309 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4310 return RUK_NonUniqueHidden;
4311
4312 // If we're required to publish this symbol, as we might be under an
4313 // explicit instantiation, leave it with default visibility but
4314 // enable string-comparisons.
4315 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4316 return RUK_NonUniqueVisible;
4317}
4318
4319// Find out how to codegen the complete destructor and constructor
4320namespace {
4321enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4322}
4323static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4324 const CXXMethodDecl *MD) {
4325 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4326 return StructorCodegen::Emit;
4327
4328 // The complete and base structors are not equivalent if there are any virtual
4329 // bases, so emit separate functions.
4330 if (MD->getParent()->getNumVBases())
4331 return StructorCodegen::Emit;
4332
4334 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4336 } else {
4337 const auto *CD = cast<CXXConstructorDecl>(MD);
4339 }
4340 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4341
4342 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4343 return StructorCodegen::RAUW;
4344
4345 // FIXME: Should we allow available_externally aliases?
4346 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4347 return StructorCodegen::RAUW;
4348
4349 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4350 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4351 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4352 CGM.getTarget().getTriple().isOSBinFormatWasm())
4353 return StructorCodegen::COMDAT;
4354 return StructorCodegen::Emit;
4355 }
4356
4357 return StructorCodegen::Alias;
4358}
4359
4362 GlobalDecl TargetDecl) {
4363 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4364
4365 StringRef MangledName = CGM.getMangledName(AliasDecl);
4366 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4367 if (Entry && !Entry->isDeclaration())
4368 return;
4369
4370 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4371
4372 // Create the alias with no name.
4373 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4374
4375 // Constructors and destructors are always unnamed_addr.
4376 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4377
4378 // Switch any previous uses to the alias.
4379 if (Entry) {
4380 assert(Entry->getType() == Aliasee->getType() &&
4381 "declaration exists with different type");
4382 Alias->takeName(Entry);
4383 Entry->replaceAllUsesWith(Alias);
4384 Entry->eraseFromParent();
4385 } else {
4386 Alias->setName(MangledName);
4387 }
4388
4389 // Finally, set up the alias with its proper name and attributes.
4390 CGM.SetCommonAttributes(AliasDecl, Alias);
4391}
4392
4393void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4394 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4395 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4396 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4397
4398 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4399
4400 if (CD ? GD.getCtorType() == Ctor_Complete
4401 : GD.getDtorType() == Dtor_Complete) {
4402 GlobalDecl BaseDecl;
4403 if (CD)
4404 BaseDecl = GD.getWithCtorType(Ctor_Base);
4405 else
4406 BaseDecl = GD.getWithDtorType(Dtor_Base);
4407
4408 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4409 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4410 return;
4411 }
4412
4413 if (CGType == StructorCodegen::RAUW) {
4414 StringRef MangledName = CGM.getMangledName(GD);
4415 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4416 CGM.addReplacement(MangledName, Aliasee);
4417 return;
4418 }
4419 }
4420
4421 // The base destructor is equivalent to the base destructor of its
4422 // base class if there is exactly one non-virtual base class with a
4423 // non-trivial destructor, there are no fields with a non-trivial
4424 // destructor, and the body of the destructor is trivial.
4425 if (DD && GD.getDtorType() == Dtor_Base &&
4426 CGType != StructorCodegen::COMDAT &&
4428 return;
4429
4430 // FIXME: The deleting destructor is equivalent to the selected operator
4431 // delete if:
4432 // * either the delete is a destroying operator delete or the destructor
4433 // would be trivial if it weren't virtual,
4434 // * the conversion from the 'this' parameter to the first parameter of the
4435 // destructor is equivalent to a bitcast,
4436 // * the destructor does not have an implicit "this" return, and
4437 // * the operator delete has the same calling convention and IR function type
4438 // as the destructor.
4439 // In such cases we should try to emit the deleting dtor as an alias to the
4440 // selected 'operator delete'.
4441
4442 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4443
4444 if (CGType == StructorCodegen::COMDAT) {
4445 SmallString<256> Buffer;
4446 llvm::raw_svector_ostream Out(Buffer);
4447 if (DD)
4448 getMangleContext().mangleCXXDtorComdat(DD, Out);
4449 else
4450 getMangleContext().mangleCXXCtorComdat(CD, Out);
4451 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4452 Fn->setComdat(C);
4453 } else {
4454 CGM.maybeSetTrivialComdat(*MD, *Fn);
4455 }
4456}
4457
4458static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4459 // void *__cxa_begin_catch(void*);
4460 llvm::FunctionType *FTy = llvm::FunctionType::get(
4461 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4462
4463 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4464}
4465
4466static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4467 // void __cxa_end_catch();
4468 llvm::FunctionType *FTy =
4469 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4470
4471 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4472}
4473
4474static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4475 // void *__cxa_get_exception_ptr(void*);
4476 llvm::FunctionType *FTy = llvm::FunctionType::get(
4477 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4478
4479 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4480}
4481
4482namespace {
4483 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4484 /// exception type lets us state definitively that the thrown exception
4485 /// type does not have a destructor. In particular:
4486 /// - Catch-alls tell us nothing, so we have to conservatively
4487 /// assume that the thrown exception might have a destructor.
4488 /// - Catches by reference behave according to their base types.
4489 /// - Catches of non-record types will only trigger for exceptions
4490 /// of non-record types, which never have destructors.
4491 /// - Catches of record types can trigger for arbitrary subclasses
4492 /// of the caught type, so we have to assume the actual thrown
4493 /// exception type might have a throwing destructor, even if the
4494 /// caught type's destructor is trivial or nothrow.
4495 struct CallEndCatch final : EHScopeStack::Cleanup {
4496 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4497 bool MightThrow;
4498
4499 void Emit(CodeGenFunction &CGF, Flags flags) override {
4500 if (!MightThrow) {
4502 return;
4503 }
4504
4506 }
4507 };
4508}
4509
4510/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4511/// __cxa_end_catch.
4512///
4513/// \param EndMightThrow - true if __cxa_end_catch might throw
4514static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4515 llvm::Value *Exn,
4516 bool EndMightThrow) {
4517 llvm::CallInst *call =
4519
4520 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4521
4522 return call;
4523}
4524
4525/// A "special initializer" callback for initializing a catch
4526/// parameter during catch initialization.
4528 const VarDecl &CatchParam,
4529 Address ParamAddr,
4530 SourceLocation Loc) {
4531 // Load the exception from where the landing pad saved it.
4532 llvm::Value *Exn = CGF.getExceptionFromSlot();
4533
4534 CanQualType CatchType =
4535 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4536 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4537
4538 // If we're catching by reference, we can just cast the object
4539 // pointer to the appropriate pointer.
4540 if (isa<ReferenceType>(CatchType)) {
4541 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4542 bool EndCatchMightThrow = CaughtType->isRecordType();
4543
4544 // __cxa_begin_catch returns the adjusted object pointer.
4545 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4546
4547 // We have no way to tell the personality function that we're
4548 // catching by reference, so if we're catching a pointer,
4549 // __cxa_begin_catch will actually return that pointer by value.
4550 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4551 QualType PointeeType = PT->getPointeeType();
4552
4553 // When catching by reference, generally we should just ignore
4554 // this by-value pointer and use the exception object instead.
4555 if (!PointeeType->isRecordType()) {
4556
4557 // Exn points to the struct _Unwind_Exception header, which
4558 // we have to skip past in order to reach the exception data.
4559 unsigned HeaderSize =
4561 AdjustedExn =
4562 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4563
4564 // However, if we're catching a pointer-to-record type that won't
4565 // work, because the personality function might have adjusted
4566 // the pointer. There's actually no way for us to fully satisfy
4567 // the language/ABI contract here: we can't use Exn because it
4568 // might have the wrong adjustment, but we can't use the by-value
4569 // pointer because it's off by a level of abstraction.
4570 //
4571 // The current solution is to dump the adjusted pointer into an
4572 // alloca, which breaks language semantics (because changing the
4573 // pointer doesn't change the exception) but at least works.
4574 // The better solution would be to filter out non-exact matches
4575 // and rethrow them, but this is tricky because the rethrow
4576 // really needs to be catchable by other sites at this landing
4577 // pad. The best solution is to fix the personality function.
4578 } else {
4579 // Pull the pointer for the reference type off.
4580 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4581
4582 // Create the temporary and write the adjusted pointer into it.
4583 Address ExnPtrTmp =
4584 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4585 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4586 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4587
4588 // Bind the reference to the temporary.
4589 AdjustedExn = ExnPtrTmp.getPointer();
4590 }
4591 }
4592
4593 llvm::Value *ExnCast =
4594 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4595 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4596 return;
4597 }
4598
4599 // Scalars and complexes.
4600 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4601 if (TEK != TEK_Aggregate) {
4602 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4603
4604 // If the catch type is a pointer type, __cxa_begin_catch returns
4605 // the pointer by value.
4606 if (CatchType->hasPointerRepresentation()) {
4607 llvm::Value *CastExn =
4608 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4609
4610 switch (CatchType.getQualifiers().getObjCLifetime()) {
4612 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4613 [[fallthrough]];
4614
4618 CGF.Builder.CreateStore(CastExn, ParamAddr);
4619 return;
4620
4622 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4623 return;
4624 }
4625 llvm_unreachable("bad ownership qualifier!");
4626 }
4627
4628 // Otherwise, it returns a pointer into the exception object.
4629
4630 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType);
4631 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4632 switch (TEK) {
4633 case TEK_Complex:
4634 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4635 /*init*/ true);
4636 return;
4637 case TEK_Scalar: {
4638 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4639 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4640 return;
4641 }
4642 case TEK_Aggregate:
4643 llvm_unreachable("evaluation kind filtered out!");
4644 }
4645 llvm_unreachable("bad evaluation kind");
4646 }
4647
4648 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4649 auto catchRD = CatchType->getAsCXXRecordDecl();
4650 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4651
4652 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
4653
4654 // Check for a copy expression. If we don't have a copy expression,
4655 // that means a trivial copy is okay.
4656 const Expr *copyExpr = CatchParam.getInit();
4657 if (!copyExpr) {
4658 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4659 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4660 LLVMCatchTy, caughtExnAlignment);
4661 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4662 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4663 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4664 return;
4665 }
4666
4667 // We have to call __cxa_get_exception_ptr to get the adjusted
4668 // pointer before copying.
4669 llvm::CallInst *rawAdjustedExn =
4671
4672 // Cast that to the appropriate type.
4673 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4674 LLVMCatchTy, caughtExnAlignment);
4675
4676 // The copy expression is defined in terms of an OpaqueValueExpr.
4677 // Find it and map it to the adjusted expression.
4678 CodeGenFunction::OpaqueValueMapping
4679 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4680 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4681
4682 // Call the copy ctor in a terminate scope.
4683 CGF.EHStack.pushTerminate();
4684
4685 // Perform the copy construction.
4686 CGF.EmitAggExpr(copyExpr,
4687 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4692
4693 // Leave the terminate scope.
4694 CGF.EHStack.popTerminate();
4695
4696 // Undo the opaque value mapping.
4697 opaque.pop();
4698
4699 // Finally we can call __cxa_begin_catch.
4700 CallBeginCatch(CGF, Exn, true);
4701}
4702
4703/// Begins a catch statement by initializing the catch variable and
4704/// calling __cxa_begin_catch.
4705void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4706 const CXXCatchStmt *S) {
4707 // We have to be very careful with the ordering of cleanups here:
4708 // C++ [except.throw]p4:
4709 // The destruction [of the exception temporary] occurs
4710 // immediately after the destruction of the object declared in
4711 // the exception-declaration in the handler.
4712 //
4713 // So the precise ordering is:
4714 // 1. Construct catch variable.
4715 // 2. __cxa_begin_catch
4716 // 3. Enter __cxa_end_catch cleanup
4717 // 4. Enter dtor cleanup
4718 //
4719 // We do this by using a slightly abnormal initialization process.
4720 // Delegation sequence:
4721 // - ExitCXXTryStmt opens a RunCleanupsScope
4722 // - EmitAutoVarAlloca creates the variable and debug info
4723 // - InitCatchParam initializes the variable from the exception
4724 // - CallBeginCatch calls __cxa_begin_catch
4725 // - CallBeginCatch enters the __cxa_end_catch cleanup
4726 // - EmitAutoVarCleanups enters the variable destructor cleanup
4727 // - EmitCXXTryStmt emits the code for the catch body
4728 // - EmitCXXTryStmt close the RunCleanupsScope
4729
4730 VarDecl *CatchParam = S->getExceptionDecl();
4731 if (!CatchParam) {
4732 llvm::Value *Exn = CGF.getExceptionFromSlot();
4733 CallBeginCatch(CGF, Exn, true);
4734 return;
4735 }
4736
4737 // Emit the local.
4738 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4739 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4740 CGF.EmitAutoVarCleanups(var);
4741}
4742
4743/// Get or define the following function:
4744/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4745/// This code is used only in C++.
4746static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4747 ASTContext &C = CGM.getContext();
4749 C.VoidTy, {C.getPointerType(C.CharTy)});
4750 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
4751 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4752 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4753 llvm::Function *fn =
4754 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4755 if (fn->empty()) {
4756 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
4758 fn->setDoesNotThrow();
4759 fn->setDoesNotReturn();
4760
4761 // What we really want is to massively penalize inlining without
4762 // forbidding it completely. The difference between that and
4763 // 'noinline' is negligible.
4764 fn->addFnAttr(llvm::Attribute::NoInline);
4765
4766 // Allow this function to be shared across translation units, but
4767 // we don't want it to turn into an exported symbol.
4768 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4769 fn->setVisibility(llvm::Function::HiddenVisibility);
4770 if (CGM.supportsCOMDAT())
4771 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4772
4773 // Set up the function.
4774 llvm::BasicBlock *entry =
4775 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4776 CGBuilderTy builder(CGM, entry);
4777
4778 // Pull the exception pointer out of the parameter list.
4779 llvm::Value *exn = &*fn->arg_begin();
4780
4781 // Call __cxa_begin_catch(exn).
4782 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4783 catchCall->setDoesNotThrow();
4784 catchCall->setCallingConv(CGM.getRuntimeCC());
4785
4786 // Call std::terminate().
4787 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4788 termCall->setDoesNotThrow();
4789 termCall->setDoesNotReturn();
4790 termCall->setCallingConv(CGM.getRuntimeCC());
4791
4792 // std::terminate cannot return.
4793 builder.CreateUnreachable();
4794 }
4795 return fnRef;
4796}
4797
4798llvm::CallInst *
4799ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4800 llvm::Value *Exn) {
4801 // In C++, we want to call __cxa_begin_catch() before terminating.
4802 if (Exn) {
4803 assert(CGF.CGM.getLangOpts().CPlusPlus);
4805 }
4806 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4807}
4808
4809std::pair<llvm::Value *, const CXXRecordDecl *>
4810ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4811 const CXXRecordDecl *RD) {
4812 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4813}
4814
4815void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4816 const CXXCatchStmt *C) {
4817 if (CGF.getTarget().hasFeature("exception-handling"))
4818 CGF.EHStack.pushCleanup<CatchRetScope>(
4819 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4820 ItaniumCXXABI::emitBeginCatch(CGF, C);
4821}
4822
4823llvm::CallInst *
4824WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4825 llvm::Value *Exn) {
4826 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4827 // the violating exception to mark it handled, but it is currently hard to do
4828 // with wasm EH instruction structure with catch/catch_all, we just call
4829 // std::terminate and ignore the violating exception as in CGCXXABI.
4830 // TODO Consider code transformation that makes calling __clang_call_terminate
4831 // possible.
4833}
4834
4835/// Register a global destructor as best as we know how.
4836void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4837 llvm::FunctionCallee Dtor,
4838 llvm::Constant *Addr) {
4839 if (D.getTLSKind() != VarDecl::TLS_None) {
4840 llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
4841
4842 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4843 llvm::FunctionType *AtExitTy =
4844 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true);
4845
4846 // Fetch the actual function.
4847 llvm::FunctionCallee AtExit =
4848 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4849
4850 // Create __dtor function for the var decl.
4851 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4852
4853 // Register above __dtor with atexit().
4854 // First param is flags and must be 0, second param is function ptr
4855 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4856 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4857
4858 // Cannot unregister TLS __dtor so done
4859 return;
4860 }
4861
4862 // Create __dtor function for the var decl.
4863 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4864
4865 // Register above __dtor with atexit().
4866 CGF.registerGlobalDtorWithAtExit(DtorStub);
4867
4868 // Emit __finalize function to unregister __dtor and (as appropriate) call
4869 // __dtor.
4870 emitCXXStermFinalizer(D, DtorStub, Addr);
4871}
4872
4873void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4874 llvm::Constant *addr) {
4875 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4876 SmallString<256> FnName;
4877 {
4878 llvm::raw_svector_ostream Out(FnName);
4879 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4880 }
4881
4882 // Create the finalization action associated with a variable.
4884 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4885 FTy, FnName.str(), FI, D.getLocation());
4886
4887 CodeGenFunction CGF(CGM);
4888
4889 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4891 D.getInit()->getExprLoc());
4892
4893 // The unatexit subroutine unregisters __dtor functions that were previously
4894 // registered by the atexit subroutine. If the referenced function is found,
4895 // the unatexit returns a value of 0, meaning that the cleanup is still
4896 // pending (and we should call the __dtor function).
4897 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4898
4899 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4900
4901 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4902 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4903
4904 // Check if unatexit returns a value of 0. If it does, jump to
4905 // DestructCallBlock, otherwise jump to EndBlock directly.
4906 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4907
4908 CGF.EmitBlock(DestructCallBlock);
4909
4910 // Emit the call to dtorStub.
4911 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4912
4913 // Make sure the call and the callee agree on calling convention.
4914 CI->setCallingConv(dtorStub->getCallingConv());
4915
4916 CGF.EmitBlock(EndBlock);
4917
4918 CGF.FinishFunction();
4919
4920 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4921 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4922 IPA->getPriority());
4924 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4925 // According to C++ [basic.start.init]p2, class template static data
4926 // members (i.e., implicitly or explicitly instantiated specializations)
4927 // have unordered initialization. As a consequence, we can put them into
4928 // their own llvm.global_dtors entry.
4929 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4930 } else {
4931 CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4932 }
4933}
#define V(N, I)
Definition: ASTContext.h:3233
static StructorCodegen getCodegenToUse(CodeGenModule &CGM, const CXXMethodDecl *MD)
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM)
Get or define the following function: void @__clang_call_terminate(i8* exn) nounwind noreturn This co...
static llvm::Value * performTypeAdjustment(CodeGenFunction &CGF, Address InitialPtr, int64_t NonVirtualAdjustment, int64_t VirtualAdjustment, bool IsReturnAdjustment)
static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type)
Compute the flags for a __pbase_type_info, and remove the corresponding pieces from Type.
static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty)
ShouldUseExternalRTTIDescriptor - Returns whether the type information for the given type exists some...
static bool IsIncompleteClassType(const RecordType *RecordTy)
IsIncompleteClassType - Returns whether the given record type is incomplete.
static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, SeenBases &Bases)
ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in abi::__vmi_class_type_info.
static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF)
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, llvm::FunctionCallee dtor, llvm::Constant *addr, bool TLS)
Register a global destructor using __cxa_atexit.
static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM)
static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty)
Return the linkage that the type info and type info name constants should have for the given type.
static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static llvm::Function * createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, StringRef FnName)
static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM)
static bool IsStandardLibraryRTTIDescriptor(QualType Ty)
IsStandardLibraryRTTIDescriptor - Returns whether the type information for the given type exists in t...
static llvm::Value * CallBeginCatch(CodeGenFunction &CGF, llvm::Value *Exn, bool EndMightThrow)
Emits a call to __cxa_begin_catch and enters a cleanup to call __cxa_end_catch.
static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static CharUnits computeOffsetHint(ASTContext &Context, const CXXRecordDecl *Src, const CXXRecordDecl *Dst)
Compute the src2dst_offset hint as described in the Itanium C++ ABI [2.9.7].
static bool isThreadWrapperReplaceable(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD)
static void InitCatchParam(CodeGenFunction &CGF, const VarDecl &CatchParam, Address ParamAddr, SourceLocation Loc)
A "special initializer" callback for initializing a catch parameter during catch initialization.
static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty)
TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type info for that type is de...
static bool CanUseSingleInheritance(const CXXRecordDecl *RD)
static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM)
static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
Get the appropriate linkage for the wrapper function.
static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM)
static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static bool ContainsIncompleteClassType(QualType Ty)
ContainsIncompleteClassType - Returns whether the given type contains an incomplete class type.
static void emitConstructorDestructorAlias(CodeGenModule &CGM, GlobalDecl AliasDecl, GlobalDecl TargetDecl)
static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM)
static void dtorTy(Block *, std::byte *Ptr, const Descriptor *)
Definition: Descriptor.cpp:28
int Priority
Definition: Format.cpp:2940
static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD)
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D)
Determine what kind of template specialization the given declaration is.
#define CXXABI(Name, Str)
Definition: TargetCXXABI.h:32
C Language Family Type Representation.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
const ValueDecl * getMemberPointerDecl() const
Definition: APValue.cpp:1049
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
CanQualType LongTy
Definition: ASTContext.h:1086
QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl, ObjCInterfaceDecl *PrevDecl=nullptr) const
getObjCInterfaceType - Return the unique reference to the type for the specified ObjC interface decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2527
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1104
IdentifierTable & Idents
Definition: ASTContext.h:630
const LangOptions & getLangOpts() const
Definition: ASTContext.h:761
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType CharTy
Definition: ASTContext.h:1079
CanQualType IntTy
Definition: ASTContext.h:1086
CharUnits getExnObjectAlignment() const
Return the alignment (in bytes) of the thrown exception object.
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getPreferredTypeAlignInChars(QualType T) const
Return the PreferredAlignment of a (complete) type T, in characters.
Definition: ASTContext.h:2358
CanQualType VoidTy
Definition: ASTContext.h:1077
CanQualType UnsignedIntTy
Definition: ASTContext.h:1087
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:743
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
TargetCXXABI::Kind getCXXABIKind() const
Return the C++ ABI kind that should be used.
Definition: ASTContext.cpp:872
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
CanQualType LongLongTy
Definition: ASTContext.h:1086
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:259
This class is used for builtin types like 'int'.
Definition: Type.h:2682
Kind getKind() const
Definition: Type.h:2724
Implements C++ ABI-specific semantic analysis functions.
Definition: CXXABI.h:29
Represents a path from a specific derived class (which is not represented as part of the path) to a p...
BasePaths - Represents the set of paths from a derived class to one of its (direct or indirect) bases...
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:245
CXXCatchStmt - This represents a C++ catch block.
Definition: StmtCXX.h:28
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2491
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2486
FunctionDecl * getOperatorDelete() const
Definition: ExprCXX.h:2525
bool isGlobalDelete() const
Definition: ExprCXX.h:2511
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2755
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:176
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2035
bool isVirtual() const
Definition: DeclCXX.h:2079
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2150
bool isInstance() const
Definition: DeclCXX.h:2062
CXXMethodDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: DeclCXX.h:2120
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2212
Represents a C++ struct/union/class.
Definition: DeclCXX.h:254
base_class_range bases()
Definition: DeclCXX.h:606
unsigned getNumBases() const
Retrieves the number of base classes of this class.
Definition: DeclCXX.h:600
base_class_iterator bases_begin()
Definition: DeclCXX.h:613
base_class_range vbases()
Definition: DeclCXX.h:623
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition: DeclCXX.h:1203
bool isDynamicClass() const
Definition: DeclCXX.h:572
bool hasDefinition() const
Definition: DeclCXX.h:559
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:621
bool isDerivedFrom(const CXXRecordDecl *Base) const
Determine whether this class is derived from the class Base.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1192
const Expr * getSubExpr() const
Definition: ExprCXX.h:1212
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
Qualifiers getQualifiers() const
Retrieve all qualifiers.
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3502
CastKind getCastKind() const
Definition: Expr.h:3546
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
std::string SymbolPartition
The name of the partition that symbols are assigned to, specified with -fsymbol-partition (see https:...
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
An aligned address.
Definition: Address.h:29
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:78
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:62
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:68
llvm::Value * getPointer() const
Definition: Address.h:51
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:57
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:858
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:97
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:262
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:89
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:216
Address CreateGEP(Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:249
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
virtual bool shouldEmitExactDynamicCast(QualType DestRecordTy)=0
virtual void EmitCXXConstructors(const CXXConstructorDecl *D)=0
Emit constructor variants required by this ABI.
virtual llvm::Constant * getAddrOfRTTIDescriptor(QualType Ty)=0
virtual llvm::Value * performReturnAdjustment(CodeGenFunction &CGF, Address Ret, const ReturnAdjustment &RA)=0
virtual llvm::Value * getVTableAddressPointInStructor(CodeGenFunction &CGF, const CXXRecordDecl *RD, BaseSubobject Base, const CXXRecordDecl *NearestVBase)=0
Get the address point of the vtable for the given base subobject while building a constructor or a de...
virtual void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C)=0
virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn)=0
virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const =0
virtual bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr)=0
Checks if ABI requires extra virtual offset for vtable field.
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)=0
Emits the guarded initializer and destructor setup for the given variable, given that it couldn't be ...
virtual void EmitCXXDestructors(const CXXDestructorDecl *D)=0
Emit destructor variants required by this ABI.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const =0
Returns true if the given destructor type should be emitted as a linkonce delegating thunk,...
virtual bool NeedsVTTParameter(GlobalDecl GD)
Return whether the given global decl needs a VTT parameter.
Definition: CGCXXABI.cpp:318
virtual llvm::CallInst * emitTerminateForUnexpectedException(CodeGenFunction &CGF, llvm::Value *Exn)
Definition: CGCXXABI.cpp:323
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Definition: CGCXXABI.h:157
@ RAA_Indirect
Pass it as a pointer to temporary memory.
Definition: CGCXXABI.h:165
virtual llvm::Type * ConvertMemberPointerType(const MemberPointerType *MPT)
Find the LLVM type used to represent the given member pointer type.
Definition: CGCXXABI.cpp:37
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:95
virtual StringRef GetPureVirtualCallName()=0
Gets the pure virtual member call function.
virtual CharUnits getArrayCookieSizeImpl(QualType elementType)
Returns the extra size required in order to store the array cookie for the given type.
Definition: CGCXXABI.cpp:211
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const =0
Determine whether it's possible to emit a vtable for RD, even though we do not know that the vtable h...
virtual StringRef GetDeletedVirtualCallName()=0
Gets the deleted virtual member call name.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
bool isEmittedWithConstantInitializer(const VarDecl *VD, bool InspectInitForWeakDef=false) const
Determine whether we will definitely emit this variable with a constant initializer,...
Definition: CGCXXABI.cpp:166
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:77
virtual llvm::Constant * EmitMemberPointer(const APValue &MP, QualType MPT)
Create a member pointer for the given member pointer constant.
Definition: CGCXXABI.cpp:109
virtual llvm::Constant * getVTableAddressPoint(BaseSubobject Base, const CXXRecordDecl *VTableClass)=0
Get the address point of the vtable for the given base subobject.
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
virtual llvm::Value * readArrayCookieImpl(CodeGenFunction &IGF, Address ptr, CharUnits cookieSize)
Reads the array cookie for an allocation which is known to have one.
Definition: CGCXXABI.cpp:267
virtual llvm::Value * EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, const MemberPointerType *MPT)
Calculate an l-value from an object and a data member pointer.
Definition: CGCXXABI.cpp:55
virtual llvm::Value * getCXXDestructorImplicitParam(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating)=0
Get the implicit (second) parameter that comes after the "this" pointer, or nullptr if there is isn't...
virtual std::pair< llvm::Value *, const CXXRecordDecl * > LoadVTablePtr(CodeGenFunction &CGF, Address This, const CXXRecordDecl *RD)=0
Load a vtable from This, an object of polymorphic type RD, or from one of its virtual bases if it doe...
virtual llvm::Constant * getVTableAddressPointForConstExpr(BaseSubobject Base, const CXXRecordDecl *VTableClass)=0
Get the address point of the vtable for the given base subobject while building a constexpr.
virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, bool ReturnAdjustment)=0
virtual llvm::Value * EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, Address This, DeleteOrMemberCallExpr E)=0
Emit the ABI-specific virtual destructor call.
bool mayNeedDestruction(const VarDecl *VD) const
Definition: CGCXXABI.cpp:153
virtual bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass)=0
Checks if ABI requires to initialize vptrs for given dynamic class.
virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E)=0
virtual llvm::Value * GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl)=0
virtual bool isThisCompleteObject(GlobalDecl GD) const =0
Determine whether there's something special about the rules of the ABI tell us that 'this' is a compl...
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
virtual void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, Address Ptr, QualType ElementType, const CXXDestructorDecl *Dtor)=0
virtual CatchTypeInfo getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType)=0
virtual void EmitThreadLocalInitFuncs(CodeGenModule &CGM, ArrayRef< const VarDecl * > CXXThreadLocals, ArrayRef< llvm::Function * > CXXThreadLocalInits, ArrayRef< const VarDecl * > CXXThreadLocalInitVars)=0
Emits ABI-required functions necessary to initialize thread_local variables in this translation unit.
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual llvm::Value * emitExactDynamicCast(CodeGenFunction &CGF, Address Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess, llvm::BasicBlock *CastFail)=0
Emit a dynamic_cast from SrcRecordTy to DestRecordTy.
virtual void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)=0
Emit the destructor call.
virtual llvm::GlobalVariable * getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset)=0
Get the address of the vtable for the given record decl which should be used for the vptr at the give...
virtual bool EmitBadCastCall(CodeGenFunction &CGF)=0
virtual llvm::Constant * EmitMemberDataPointer(const MemberPointerType *MPT, CharUnits offset)
Create a member pointer for the given field.
Definition: CGCXXABI.cpp:104
virtual llvm::Value * EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, Address ThisPtr, llvm::Type *StdTypeInfoPtrTy)=0
virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD)=0
Emit any tables needed to implement virtual inheritance.
virtual void emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD)=0
Emits the VTable definitions required for the given record type.
virtual CGCallee EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E, Address This, llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr, const MemberPointerType *MPT)
Load a member function from an object and a member function pointer.
Definition: CGCXXABI.cpp:41
virtual void emitCXXStructor(GlobalDecl GD)=0
Emit a single constructor/destructor with the given type from a C++ constructor Decl.
virtual bool exportThunk()=0
virtual void EmitBadTypeidCall(CodeGenFunction &CGF)=0
virtual llvm::Value * emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value, QualType SrcRecordTy)=0
virtual bool isZeroInitializable(const MemberPointerType *MPT)
Return true if the given member pointer can be zero-initialized (in the C++ sense) with an LLVM zeroi...
Definition: CGCXXABI.cpp:113
virtual bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy)=0
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition: CGCXXABI.cpp:64
virtual llvm::Constant * EmitMemberFunctionPointer(const CXXMethodDecl *MD)
Create a member pointer for the given method.
Definition: CGCXXABI.cpp:99
virtual llvm::Value * emitDynamicCastCall(CodeGenFunction &CGF, Address Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd)=0
virtual Address InitializeArrayCookie(CodeGenFunction &CGF, Address NewPtr, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType)
Initialize the array cookie for the given allocation.
Definition: CGCXXABI.cpp:216
virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, QualType SrcRecordTy)=0
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
virtual llvm::Value * performThisAdjustment(CodeGenFunction &CGF, Address This, const ThisAdjustment &TA)=0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:117
virtual AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating)=0
All available information about a concrete callee.
Definition: CGCall.h:61
static CGCallee forVirtual(const CallExpr *CE, GlobalDecl MD, Address Addr, llvm::FunctionType *FTy)
Definition: CGCall.h:138
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:128