clang 19.0.0git
ItaniumCXXABI.cpp
Go to the documentation of this file.
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGRecordLayout.h"
23#include "CGVTables.h"
24#include "CodeGenFunction.h"
25#include "CodeGenModule.h"
26#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/Mangle.h"
29#include "clang/AST/StmtCXX.h"
30#include "clang/AST/Type.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/ScopedPrinter.h"
38
39#include <optional>
40
41using namespace clang;
42using namespace CodeGen;
43
44namespace {
45class ItaniumCXXABI : public CodeGen::CGCXXABI {
46 /// VTables - All the vtables which have been defined.
47 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
48
49 /// All the thread wrapper functions that have been used.
51 ThreadWrappers;
52
53protected:
54 bool UseARMMethodPtrABI;
55 bool UseARMGuardVarABI;
56 bool Use32BitVTableOffsetABI;
57
59 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
60 }
61
62public:
63 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
64 bool UseARMMethodPtrABI = false,
65 bool UseARMGuardVarABI = false) :
66 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
67 UseARMGuardVarABI(UseARMGuardVarABI),
68 Use32BitVTableOffsetABI(false) { }
69
70 bool classifyReturnType(CGFunctionInfo &FI) const override;
71
72 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
73 // If C++ prohibits us from making a copy, pass by address.
74 if (!RD->canPassInRegisters())
75 return RAA_Indirect;
76 return RAA_Default;
77 }
78
79 bool isThisCompleteObject(GlobalDecl GD) const override {
80 // The Itanium ABI has separate complete-object vs. base-object
81 // variants of both constructors and destructors.
82 if (isa<CXXDestructorDecl>(GD.getDecl())) {
83 switch (GD.getDtorType()) {
84 case Dtor_Complete:
85 case Dtor_Deleting:
86 return true;
87
88 case Dtor_Base:
89 return false;
90
91 case Dtor_Comdat:
92 llvm_unreachable("emitting dtor comdat as function?");
93 }
94 llvm_unreachable("bad dtor kind");
95 }
96 if (isa<CXXConstructorDecl>(GD.getDecl())) {
97 switch (GD.getCtorType()) {
98 case Ctor_Complete:
99 return true;
100
101 case Ctor_Base:
102 return false;
103
106 llvm_unreachable("closure ctors in Itanium ABI?");
107
108 case Ctor_Comdat:
109 llvm_unreachable("emitting ctor comdat as function?");
110 }
111 llvm_unreachable("bad dtor kind");
112 }
113
114 // No other kinds.
115 return false;
116 }
117
118 bool isZeroInitializable(const MemberPointerType *MPT) override;
119
120 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
121
124 const Expr *E,
125 Address This,
126 llvm::Value *&ThisPtrForCall,
127 llvm::Value *MemFnPtr,
128 const MemberPointerType *MPT) override;
129
130 llvm::Value *
133 llvm::Value *MemPtr,
134 const MemberPointerType *MPT) override;
135
137 const CastExpr *E,
138 llvm::Value *Src) override;
139 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
140 llvm::Constant *Src) override;
141
142 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
143
144 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
145 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
146 CharUnits offset) override;
147 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
148 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
150
152 llvm::Value *L, llvm::Value *R,
153 const MemberPointerType *MPT,
154 bool Inequality) override;
155
157 llvm::Value *Addr,
158 const MemberPointerType *MPT) override;
159
161 Address Ptr, QualType ElementType,
162 const CXXDestructorDecl *Dtor) override;
163
164 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
165 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
166
167 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
168
169 llvm::CallInst *
171 llvm::Value *Exn) override;
172
173 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
174 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
177 QualType CatchHandlerType) override {
179 }
180
181 bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
182 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
183 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
184 Address ThisPtr,
185 llvm::Type *StdTypeInfoPtrTy) override;
186
187 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
188 QualType SrcRecordTy) override;
189
190 /// Determine whether we know that all instances of type RecordTy will have
191 /// the same vtable pointer values, that is distinct from all other vtable
192 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
193 /// practice in some cases due to language extensions.
194 bool hasUniqueVTablePointer(QualType RecordTy) {
195 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
196
197 // Under -fapple-kext, multiple definitions of the same vtable may be
198 // emitted.
199 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
200 getContext().getLangOpts().AppleKext)
201 return false;
202
203 // If the type_info* would be null, the vtable might be merged with that of
204 // another type.
205 if (!CGM.shouldEmitRTTI())
206 return false;
207
208 // If there's only one definition of the vtable in the program, it has a
209 // unique address.
210 if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD)))
211 return true;
212
213 // Even if there are multiple definitions of the vtable, they are required
214 // by the ABI to use the same symbol name, so should be merged at load
215 // time. However, if the class has hidden visibility, there can be
216 // different versions of the class in different modules, and the ABI
217 // library might treat them as being the same.
218 if (CGM.GetLLVMVisibility(RD->getVisibility()) !=
219 llvm::GlobalValue::DefaultVisibility)
220 return false;
221
222 return true;
223 }
224
225 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
226 return hasUniqueVTablePointer(DestRecordTy);
227 }
228
230 QualType SrcRecordTy, QualType DestTy,
231 QualType DestRecordTy,
232 llvm::BasicBlock *CastEnd) override;
233
234 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
235 QualType SrcRecordTy, QualType DestTy,
236 QualType DestRecordTy,
237 llvm::BasicBlock *CastSuccess,
238 llvm::BasicBlock *CastFail) override;
239
241 QualType SrcRecordTy) override;
242
243 bool EmitBadCastCall(CodeGenFunction &CGF) override;
244
245 llvm::Value *
247 const CXXRecordDecl *ClassDecl,
248 const CXXRecordDecl *BaseClassDecl) override;
249
250 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
251
252 AddedStructorArgCounts
254 SmallVectorImpl<CanQualType> &ArgTys) override;
255
257 CXXDtorType DT) const override {
258 // Itanium does not emit any destructor variant as an inline thunk.
259 // Delegating may occur as an optimization, but all variants are either
260 // emitted with external linkage or as linkonce if they are inline and used.
261 return false;
262 }
263
264 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
265
267 FunctionArgList &Params) override;
268
270
271 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
272 const CXXConstructorDecl *D,
274 bool ForVirtualBase,
275 bool Delegating) override;
276
278 const CXXDestructorDecl *DD,
280 bool ForVirtualBase,
281 bool Delegating) override;
282
284 CXXDtorType Type, bool ForVirtualBase,
285 bool Delegating, Address This,
286 QualType ThisTy) override;
287
289 const CXXRecordDecl *RD) override;
290
292 CodeGenFunction::VPtr Vptr) override;
293
294 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
295 return true;
296 }
297
298 llvm::Constant *
300 const CXXRecordDecl *VTableClass) override;
301
303 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
304 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
305
306 llvm::Value *getVTableAddressPointInStructorWithVTT(
307 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
308 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
309
310 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
311 CharUnits VPtrOffset) override;
312
314 Address This, llvm::Type *Ty,
315 SourceLocation Loc) override;
316
318 const CXXDestructorDecl *Dtor,
319 CXXDtorType DtorType, Address This,
320 DeleteOrMemberCallExpr E) override;
321
322 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
323
324 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
325 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
326
327 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
328 bool ReturnAdjustment) override {
329 // Allow inlining of thunks by emitting them with available_externally
330 // linkage together with vtables when needed.
331 if (ForVTable && !Thunk->hasLocalLinkage())
332 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
333 CGM.setGVProperties(Thunk, GD);
334 }
335
336 bool exportThunk() override { return true; }
337
338 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
339 const ThisAdjustment &TA) override;
340
341 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
342 const ReturnAdjustment &RA) override;
343
345 FunctionArgList &Args) const override {
346 assert(!Args.empty() && "expected the arglist to not be empty!");
347 return Args.size() - 1;
348 }
349
350 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
351 StringRef GetDeletedVirtualCallName() override
352 { return "__cxa_deleted_virtual"; }
353
354 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
356 Address NewPtr,
357 llvm::Value *NumElements,
358 const CXXNewExpr *expr,
359 QualType ElementType) override;
360 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
361 Address allocPtr,
362 CharUnits cookieSize) override;
363
364 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
365 llvm::GlobalVariable *DeclPtr,
366 bool PerformInit) override;
367 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
368 llvm::FunctionCallee dtor,
369 llvm::Constant *addr) override;
370
371 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
372 llvm::Value *Val);
374 CodeGenModule &CGM,
375 ArrayRef<const VarDecl *> CXXThreadLocals,
376 ArrayRef<llvm::Function *> CXXThreadLocalInits,
377 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
378
379 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
382 }
384 QualType LValType) override;
385
386 bool NeedsVTTParameter(GlobalDecl GD) override;
387
388 /**************************** RTTI Uniqueness ******************************/
389
390protected:
391 /// Returns true if the ABI requires RTTI type_info objects to be unique
392 /// across a program.
393 virtual bool shouldRTTIBeUnique() const { return true; }
394
395public:
396 /// What sort of unique-RTTI behavior should we use?
397 enum RTTIUniquenessKind {
398 /// We are guaranteeing, or need to guarantee, that the RTTI string
399 /// is unique.
400 RUK_Unique,
401
402 /// We are not guaranteeing uniqueness for the RTTI string, so we
403 /// can demote to hidden visibility but must use string comparisons.
404 RUK_NonUniqueHidden,
405
406 /// We are not guaranteeing uniqueness for the RTTI string, so we
407 /// have to use string comparisons, but we also have to emit it with
408 /// non-hidden visibility.
409 RUK_NonUniqueVisible
410 };
411
412 /// Return the required visibility status for the given type and linkage in
413 /// the current ABI.
414 RTTIUniquenessKind
415 classifyRTTIUniqueness(QualType CanTy,
416 llvm::GlobalValue::LinkageTypes Linkage) const;
417 friend class ItaniumRTTIBuilder;
418
419 void emitCXXStructor(GlobalDecl GD) override;
420
421 std::pair<llvm::Value *, const CXXRecordDecl *>
423 const CXXRecordDecl *RD) override;
424
425 private:
426 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
427 const auto &VtableLayout =
428 CGM.getItaniumVTableContext().getVTableLayout(RD);
429
430 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
431 // Skip empty slot.
432 if (!VtableComponent.isUsedFunctionPointerKind())
433 continue;
434
435 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
436 if (!Method->getCanonicalDecl()->isInlined())
437 continue;
438
439 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
440 auto *Entry = CGM.GetGlobalValue(Name);
441 // This checks if virtual inline function has already been emitted.
442 // Note that it is possible that this inline function would be emitted
443 // after trying to emit vtable speculatively. Because of this we do
444 // an extra pass after emitting all deferred vtables to find and emit
445 // these vtables opportunistically.
446 if (!Entry || Entry->isDeclaration())
447 return true;
448 }
449 return false;
450 }
451
452 bool isVTableHidden(const CXXRecordDecl *RD) const {
453 const auto &VtableLayout =
454 CGM.getItaniumVTableContext().getVTableLayout(RD);
455
456 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
457 if (VtableComponent.isRTTIKind()) {
458 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
459 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
460 return true;
461 } else if (VtableComponent.isUsedFunctionPointerKind()) {
462 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
463 if (Method->getVisibility() == Visibility::HiddenVisibility &&
464 !Method->isDefined())
465 return true;
466 }
467 }
468 return false;
469 }
470};
471
472class ARMCXXABI : public ItaniumCXXABI {
473public:
474 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
475 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
476 /*UseARMGuardVarABI=*/true) {}
477
478 bool constructorsAndDestructorsReturnThis() const override { return true; }
479
480 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
481 QualType ResTy) override;
482
483 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
484 Address InitializeArrayCookie(CodeGenFunction &CGF,
485 Address NewPtr,
486 llvm::Value *NumElements,
487 const CXXNewExpr *expr,
488 QualType ElementType) override;
489 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
490 CharUnits cookieSize) override;
491};
492
493class AppleARM64CXXABI : public ARMCXXABI {
494public:
495 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
496 Use32BitVTableOffsetABI = true;
497 }
498
499 // ARM64 libraries are prepared for non-unique RTTI.
500 bool shouldRTTIBeUnique() const override { return false; }
501};
502
503class FuchsiaCXXABI final : public ItaniumCXXABI {
504public:
505 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
506 : ItaniumCXXABI(CGM) {}
507
508private:
509 bool constructorsAndDestructorsReturnThis() const override { return true; }
510};
511
512class WebAssemblyCXXABI final : public ItaniumCXXABI {
513public:
514 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
515 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
516 /*UseARMGuardVarABI=*/true) {}
517 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
518 llvm::CallInst *
519 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
520 llvm::Value *Exn) override;
521
522private:
523 bool constructorsAndDestructorsReturnThis() const override { return true; }
524 bool canCallMismatchedFunctionType() const override { return false; }
525};
526
527class XLCXXABI final : public ItaniumCXXABI {
528public:
529 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
530 : ItaniumCXXABI(CGM) {}
531
532 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
533 llvm::FunctionCallee dtor,
534 llvm::Constant *addr) override;
535
536 bool useSinitAndSterm() const override { return true; }
537
538private:
539 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
540 llvm::Constant *addr);
541};
542}
543
545 switch (CGM.getContext().getCXXABIKind()) {
546 // For IR-generation purposes, there's no significant difference
547 // between the ARM and iOS ABIs.
548 case TargetCXXABI::GenericARM:
549 case TargetCXXABI::iOS:
550 case TargetCXXABI::WatchOS:
551 return new ARMCXXABI(CGM);
552
553 case TargetCXXABI::AppleARM64:
554 return new AppleARM64CXXABI(CGM);
555
556 case TargetCXXABI::Fuchsia:
557 return new FuchsiaCXXABI(CGM);
558
559 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
560 // include the other 32-bit ARM oddities: constructor/destructor return values
561 // and array cookies.
562 case TargetCXXABI::GenericAArch64:
563 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
564 /*UseARMGuardVarABI=*/true);
565
566 case TargetCXXABI::GenericMIPS:
567 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
568
569 case TargetCXXABI::WebAssembly:
570 return new WebAssemblyCXXABI(CGM);
571
572 case TargetCXXABI::XL:
573 return new XLCXXABI(CGM);
574
575 case TargetCXXABI::GenericItanium:
576 if (CGM.getContext().getTargetInfo().getTriple().getArch()
577 == llvm::Triple::le32) {
578 // For PNaCl, use ARM-style method pointers so that PNaCl code
579 // does not assume anything about the alignment of function
580 // pointers.
581 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
582 }
583 return new ItaniumCXXABI(CGM);
584
585 case TargetCXXABI::Microsoft:
586 llvm_unreachable("Microsoft ABI is not Itanium-based");
587 }
588 llvm_unreachable("bad ABI kind");
589}
590
591llvm::Type *
592ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
593 if (MPT->isMemberDataPointer())
594 return CGM.PtrDiffTy;
595 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
596}
597
598/// In the Itanium and ARM ABIs, method pointers have the form:
599/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
600///
601/// In the Itanium ABI:
602/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
603/// - the this-adjustment is (memptr.adj)
604/// - the virtual offset is (memptr.ptr - 1)
605///
606/// In the ARM ABI:
607/// - method pointers are virtual if (memptr.adj & 1) is nonzero
608/// - the this-adjustment is (memptr.adj >> 1)
609/// - the virtual offset is (memptr.ptr)
610/// ARM uses 'adj' for the virtual flag because Thumb functions
611/// may be only single-byte aligned.
612///
613/// If the member is virtual, the adjusted 'this' pointer points
614/// to a vtable pointer from which the virtual offset is applied.
615///
616/// If the member is non-virtual, memptr.ptr is the address of
617/// the function to call.
618CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
619 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
620 llvm::Value *&ThisPtrForCall,
621 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
622 CGBuilderTy &Builder = CGF.Builder;
623
624 const FunctionProtoType *FPT =
626 auto *RD =
627 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
628
629 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
630
631 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
632 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
633 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
634
635 // Extract memptr.adj, which is in the second field.
636 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
637
638 // Compute the true adjustment.
639 llvm::Value *Adj = RawAdj;
640 if (UseARMMethodPtrABI)
641 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
642
643 // Apply the adjustment and cast back to the original struct type
644 // for consistency.
645 llvm::Value *This = ThisAddr.emitRawPointer(CGF);
646 This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj);
647 ThisPtrForCall = This;
648
649 // Load the function pointer.
650 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
651
652 // If the LSB in the function pointer is 1, the function pointer points to
653 // a virtual function.
654 llvm::Value *IsVirtual;
655 if (UseARMMethodPtrABI)
656 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
657 else
658 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
659 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
660 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
661
662 // In the virtual path, the adjustment left 'This' pointing to the
663 // vtable of the correct base subobject. The "function pointer" is an
664 // offset within the vtable (+1 for the virtual flag on non-ARM).
665 CGF.EmitBlock(FnVirtual);
666
667 // Cast the adjusted this to a pointer to vtable pointer and load.
668 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
669 CharUnits VTablePtrAlign =
670 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
671 CGF.getPointerAlign());
672 llvm::Value *VTable = CGF.GetVTablePtr(
673 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
674
675 // Apply the offset.
676 // On ARM64, to reserve extra space in virtual member function pointers,
677 // we only pay attention to the low 32 bits of the offset.
678 llvm::Value *VTableOffset = FnAsInt;
679 if (!UseARMMethodPtrABI)
680 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
681 if (Use32BitVTableOffsetABI) {
682 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
683 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
684 }
685
686 // Check the address of the function pointer if CFI on member function
687 // pointers is enabled.
688 llvm::Constant *CheckSourceLocation;
689 llvm::Constant *CheckTypeDesc;
690 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
691 CGM.HasHiddenLTOVisibility(RD);
692 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
693 CGM.HasHiddenLTOVisibility(RD);
694 bool ShouldEmitWPDInfo =
695 CGM.getCodeGenOpts().WholeProgramVTables &&
696 // Don't insert type tests if we are forcing public visibility.
697 !CGM.AlwaysHasLTOVisibilityPublic(RD);
698 llvm::Value *VirtualFn = nullptr;
699
700 {
701 CodeGenFunction::SanitizerScope SanScope(&CGF);
702 llvm::Value *TypeId = nullptr;
703 llvm::Value *CheckResult = nullptr;
704
705 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
706 // If doing CFI, VFE or WPD, we will need the metadata node to check
707 // against.
708 llvm::Metadata *MD =
709 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
710 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
711 }
712
713 if (ShouldEmitVFEInfo) {
714 llvm::Value *VFPAddr =
715 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
716
717 // If doing VFE, load from the vtable with a type.checked.load intrinsic
718 // call. Note that we use the GEP to calculate the address to load from
719 // and pass 0 as the offset to the intrinsic. This is because every
720 // vtable slot of the correct type is marked with matching metadata, and
721 // we know that the load must be from one of these slots.
722 llvm::Value *CheckedLoad = Builder.CreateCall(
723 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
724 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
725 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
726 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
727 } else {
728 // When not doing VFE, emit a normal load, as it allows more
729 // optimisations than type.checked.load.
730 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
731 llvm::Value *VFPAddr =
732 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
733 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
734 ? llvm::Intrinsic::type_test
735 : llvm::Intrinsic::public_type_test;
736
737 CheckResult =
738 Builder.CreateCall(CGM.getIntrinsic(IID), {VFPAddr, TypeId});
739 }
740
741 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
742 VirtualFn = CGF.Builder.CreateCall(
743 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
744 {VTableOffset->getType()}),
745 {VTable, VTableOffset});
746 } else {
747 llvm::Value *VFPAddr =
748 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
749 VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
750 CGF.getPointerAlign(),
751 "memptr.virtualfn");
752 }
753 }
754 assert(VirtualFn && "Virtual fuction pointer not created!");
755 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
756 CheckResult) &&
757 "Check result required but not created!");
758
759 if (ShouldEmitCFICheck) {
760 // If doing CFI, emit the check.
761 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
762 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
763 llvm::Constant *StaticData[] = {
764 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
765 CheckSourceLocation,
766 CheckTypeDesc,
767 };
768
769 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
770 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
771 } else {
772 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
773 CGM.getLLVMContext(),
774 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
775 llvm::Value *ValidVtable = Builder.CreateCall(
776 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
777 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
778 SanitizerHandler::CFICheckFail, StaticData,
779 {VTable, ValidVtable});
780 }
781
782 FnVirtual = Builder.GetInsertBlock();
783 }
784 } // End of sanitizer scope
785
786 CGF.EmitBranch(FnEnd);
787
788 // In the non-virtual path, the function pointer is actually a
789 // function pointer.
790 CGF.EmitBlock(FnNonVirtual);
791 llvm::Value *NonVirtualFn =
792 Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
793
794 // Check the function pointer if CFI on member function pointers is enabled.
795 if (ShouldEmitCFICheck) {
797 if (RD->hasDefinition()) {
798 CodeGenFunction::SanitizerScope SanScope(&CGF);
799
800 llvm::Constant *StaticData[] = {
801 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
802 CheckSourceLocation,
803 CheckTypeDesc,
804 };
805
806 llvm::Value *Bit = Builder.getFalse();
807 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
808 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
809 getContext().getMemberPointerType(
810 MPT->getPointeeType(),
811 getContext().getRecordType(Base).getTypePtr()));
812 llvm::Value *TypeId =
813 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
814
815 llvm::Value *TypeTest =
816 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
817 {NonVirtualFn, TypeId});
818 Bit = Builder.CreateOr(Bit, TypeTest);
819 }
820
821 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
822 SanitizerHandler::CFICheckFail, StaticData,
823 {NonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
824
825 FnNonVirtual = Builder.GetInsertBlock();
826 }
827 }
828
829 // We're done.
830 CGF.EmitBlock(FnEnd);
831 llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
832 CalleePtr->addIncoming(VirtualFn, FnVirtual);
833 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
834
835 CGCallee Callee(FPT, CalleePtr);
836 return Callee;
837}
838
839/// Compute an l-value by applying the given pointer-to-member to a
840/// base object.
841llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
842 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
843 const MemberPointerType *MPT) {
844 assert(MemPtr->getType() == CGM.PtrDiffTy);
845
846 CGBuilderTy &Builder = CGF.Builder;
847
848 // Apply the offset, which we assume is non-null.
849 return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.emitRawPointer(CGF), MemPtr,
850 "memptr.offset");
851}
852
853/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
854/// conversion.
855///
856/// Bitcast conversions are always a no-op under Itanium.
857///
858/// Obligatory offset/adjustment diagram:
859/// <-- offset --> <-- adjustment -->
860/// |--------------------------|----------------------|--------------------|
861/// ^Derived address point ^Base address point ^Member address point
862///
863/// So when converting a base member pointer to a derived member pointer,
864/// we add the offset to the adjustment because the address point has
865/// decreased; and conversely, when converting a derived MP to a base MP
866/// we subtract the offset from the adjustment because the address point
867/// has increased.
868///
869/// The standard forbids (at compile time) conversion to and from
870/// virtual bases, which is why we don't have to consider them here.
871///
872/// The standard forbids (at run time) casting a derived MP to a base
873/// MP when the derived MP does not point to a member of the base.
874/// This is why -1 is a reasonable choice for null data member
875/// pointers.
876llvm::Value *
877ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
878 const CastExpr *E,
879 llvm::Value *src) {
880 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
881 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
882 E->getCastKind() == CK_ReinterpretMemberPointer);
883
884 // Under Itanium, reinterprets don't require any additional processing.
885 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
886
887 // Use constant emission if we can.
888 if (isa<llvm::Constant>(src))
889 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
890
891 llvm::Constant *adj = getMemberPointerAdjustment(E);
892 if (!adj) return src;
893
894 CGBuilderTy &Builder = CGF.Builder;
895 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
896
897 const MemberPointerType *destTy =
899
900 // For member data pointers, this is just a matter of adding the
901 // offset if the source is non-null.
902 if (destTy->isMemberDataPointer()) {
903 llvm::Value *dst;
904 if (isDerivedToBase)
905 dst = Builder.CreateNSWSub(src, adj, "adj");
906 else
907 dst = Builder.CreateNSWAdd(src, adj, "adj");
908
909 // Null check.
910 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
911 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
912 return Builder.CreateSelect(isNull, src, dst);
913 }
914
915 // The this-adjustment is left-shifted by 1 on ARM.
916 if (UseARMMethodPtrABI) {
917 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
918 offset <<= 1;
919 adj = llvm::ConstantInt::get(adj->getType(), offset);
920 }
921
922 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
923 llvm::Value *dstAdj;
924 if (isDerivedToBase)
925 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
926 else
927 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
928
929 return Builder.CreateInsertValue(src, dstAdj, 1);
930}
931
932llvm::Constant *
933ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
934 llvm::Constant *src) {
935 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
936 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
937 E->getCastKind() == CK_ReinterpretMemberPointer);
938
939 // Under Itanium, reinterprets don't require any additional processing.
940 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
941
942 // If the adjustment is trivial, we don't need to do anything.
943 llvm::Constant *adj = getMemberPointerAdjustment(E);
944 if (!adj) return src;
945
946 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
947
948 const MemberPointerType *destTy =
950
951 // For member data pointers, this is just a matter of adding the
952 // offset if the source is non-null.
953 if (destTy->isMemberDataPointer()) {
954 // null maps to null.
955 if (src->isAllOnesValue()) return src;
956
957 if (isDerivedToBase)
958 return llvm::ConstantExpr::getNSWSub(src, adj);
959 else
960 return llvm::ConstantExpr::getNSWAdd(src, adj);
961 }
962
963 // The this-adjustment is left-shifted by 1 on ARM.
964 if (UseARMMethodPtrABI) {
965 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
966 offset <<= 1;
967 adj = llvm::ConstantInt::get(adj->getType(), offset);
968 }
969
970 llvm::Constant *srcAdj = src->getAggregateElement(1);
971 llvm::Constant *dstAdj;
972 if (isDerivedToBase)
973 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
974 else
975 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
976
977 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
978 assert(res != nullptr && "Folding must succeed");
979 return res;
980}
981
982llvm::Constant *
983ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
984 // Itanium C++ ABI 2.3:
985 // A NULL pointer is represented as -1.
986 if (MPT->isMemberDataPointer())
987 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
988
989 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
990 llvm::Constant *Values[2] = { Zero, Zero };
991 return llvm::ConstantStruct::getAnon(Values);
992}
993
994llvm::Constant *
995ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
996 CharUnits offset) {
997 // Itanium C++ ABI 2.3:
998 // A pointer to data member is an offset from the base address of
999 // the class object containing it, represented as a ptrdiff_t
1000 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1001}
1002
1003llvm::Constant *
1004ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1005 return BuildMemberPointer(MD, CharUnits::Zero());
1006}
1007
1008llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1010 assert(MD->isInstance() && "Member function must not be static!");
1011
1012 CodeGenTypes &Types = CGM.getTypes();
1013
1014 // Get the function pointer (or index if this is a virtual function).
1015 llvm::Constant *MemPtr[2];
1016 if (MD->isVirtual()) {
1017 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1018 uint64_t VTableOffset;
1019 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1020 // Multiply by 4-byte relative offsets.
1021 VTableOffset = Index * 4;
1022 } else {
1023 const ASTContext &Context = getContext();
1024 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1025 Context.getTargetInfo().getPointerWidth(LangAS::Default));
1026 VTableOffset = Index * PointerWidth.getQuantity();
1027 }
1028
1029 if (UseARMMethodPtrABI) {
1030 // ARM C++ ABI 3.2.1:
1031 // This ABI specifies that adj contains twice the this
1032 // adjustment, plus 1 if the member function is virtual. The
1033 // least significant bit of adj then makes exactly the same
1034 // discrimination as the least significant bit of ptr does for
1035 // Itanium.
1036 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1037 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1038 2 * ThisAdjustment.getQuantity() + 1);
1039 } else {
1040 // Itanium C++ ABI 2.3:
1041 // For a virtual function, [the pointer field] is 1 plus the
1042 // virtual table offset (in bytes) of the function,
1043 // represented as a ptrdiff_t.
1044 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1045 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1046 ThisAdjustment.getQuantity());
1047 }
1048 } else {
1049 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1050 llvm::Type *Ty;
1051 // Check whether the function has a computable LLVM signature.
1052 if (Types.isFuncTypeConvertible(FPT)) {
1053 // The function has a computable LLVM signature; use the correct type.
1054 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1055 } else {
1056 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1057 // function type is incomplete.
1058 Ty = CGM.PtrDiffTy;
1059 }
1060 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1061
1062 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1063 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1064 (UseARMMethodPtrABI ? 2 : 1) *
1065 ThisAdjustment.getQuantity());
1066 }
1067
1068 return llvm::ConstantStruct::getAnon(MemPtr);
1069}
1070
1071llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1072 QualType MPType) {
1073 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1074 const ValueDecl *MPD = MP.getMemberPointerDecl();
1075 if (!MPD)
1076 return EmitNullMemberPointer(MPT);
1077
1078 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1079
1080 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1081 return BuildMemberPointer(MD, ThisAdjustment);
1082
1083 CharUnits FieldOffset =
1084 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1085 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1086}
1087
1088/// The comparison algorithm is pretty easy: the member pointers are
1089/// the same if they're either bitwise identical *or* both null.
1090///
1091/// ARM is different here only because null-ness is more complicated.
1092llvm::Value *
1093ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1094 llvm::Value *L,
1095 llvm::Value *R,
1096 const MemberPointerType *MPT,
1097 bool Inequality) {
1098 CGBuilderTy &Builder = CGF.Builder;
1099
1100 llvm::ICmpInst::Predicate Eq;
1101 llvm::Instruction::BinaryOps And, Or;
1102 if (Inequality) {
1103 Eq = llvm::ICmpInst::ICMP_NE;
1104 And = llvm::Instruction::Or;
1105 Or = llvm::Instruction::And;
1106 } else {
1107 Eq = llvm::ICmpInst::ICMP_EQ;
1108 And = llvm::Instruction::And;
1109 Or = llvm::Instruction::Or;
1110 }
1111
1112 // Member data pointers are easy because there's a unique null
1113 // value, so it just comes down to bitwise equality.
1114 if (MPT->isMemberDataPointer())
1115 return Builder.CreateICmp(Eq, L, R);
1116
1117 // For member function pointers, the tautologies are more complex.
1118 // The Itanium tautology is:
1119 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1120 // The ARM tautology is:
1121 // (L == R) <==> (L.ptr == R.ptr &&
1122 // (L.adj == R.adj ||
1123 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1124 // The inequality tautologies have exactly the same structure, except
1125 // applying De Morgan's laws.
1126
1127 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1128 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1129
1130 // This condition tests whether L.ptr == R.ptr. This must always be
1131 // true for equality to hold.
1132 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1133
1134 // This condition, together with the assumption that L.ptr == R.ptr,
1135 // tests whether the pointers are both null. ARM imposes an extra
1136 // condition.
1137 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1138 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1139
1140 // This condition tests whether L.adj == R.adj. If this isn't
1141 // true, the pointers are unequal unless they're both null.
1142 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1143 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1144 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1145
1146 // Null member function pointers on ARM clear the low bit of Adj,
1147 // so the zero condition has to check that neither low bit is set.
1148 if (UseARMMethodPtrABI) {
1149 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1150
1151 // Compute (l.adj | r.adj) & 1 and test it against zero.
1152 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1153 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1154 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1155 "cmp.or.adj");
1156 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1157 }
1158
1159 // Tie together all our conditions.
1160 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1161 Result = Builder.CreateBinOp(And, PtrEq, Result,
1162 Inequality ? "memptr.ne" : "memptr.eq");
1163 return Result;
1164}
1165
1166llvm::Value *
1167ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1168 llvm::Value *MemPtr,
1169 const MemberPointerType *MPT) {
1170 CGBuilderTy &Builder = CGF.Builder;
1171
1172 /// For member data pointers, this is just a check against -1.
1173 if (MPT->isMemberDataPointer()) {
1174 assert(MemPtr->getType() == CGM.PtrDiffTy);
1175 llvm::Value *NegativeOne =
1176 llvm::Constant::getAllOnesValue(MemPtr->getType());
1177 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1178 }
1179
1180 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1181 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1182
1183 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1184 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1185
1186 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1187 // (the virtual bit) is set.
1188 if (UseARMMethodPtrABI) {
1189 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1190 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1191 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1192 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1193 "memptr.isvirtual");
1194 Result = Builder.CreateOr(Result, IsVirtual);
1195 }
1196
1197 return Result;
1198}
1199
1200bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1201 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1202 if (!RD)
1203 return false;
1204
1205 // If C++ prohibits us from making a copy, return by address.
1206 if (!RD->canPassInRegisters()) {
1207 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1208 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1209 return true;
1210 }
1211 return false;
1212}
1213
1214/// The Itanium ABI requires non-zero initialization only for data
1215/// member pointers, for which '0' is a valid offset.
1216bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1217 return MPT->isMemberFunctionPointer();
1218}
1219
1220/// The Itanium ABI always places an offset to the complete object
1221/// at entry -2 in the vtable.
1222void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1223 const CXXDeleteExpr *DE,
1224 Address Ptr,
1225 QualType ElementType,
1226 const CXXDestructorDecl *Dtor) {
1227 bool UseGlobalDelete = DE->isGlobalDelete();
1228 if (UseGlobalDelete) {
1229 // Derive the complete-object pointer, which is what we need
1230 // to pass to the deallocation function.
1231
1232 // Grab the vtable pointer as an intptr_t*.
1233 auto *ClassDecl =
1234 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1235 llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
1236
1237 // Track back to entry -2 and pull out the offset there.
1238 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1239 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1240 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,
1241 CGF.getPointerAlign());
1242
1243 // Apply the offset.
1244 llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
1245 CompletePtr =
1246 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1247
1248 // If we're supposed to call the global delete, make sure we do so
1249 // even if the destructor throws.
1250 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1251 ElementType);
1252 }
1253
1254 // FIXME: Provide a source location here even though there's no
1255 // CXXMemberCallExpr for dtor call.
1256 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1257 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1258
1259 if (UseGlobalDelete)
1260 CGF.PopCleanupBlock();
1261}
1262
1263void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1264 // void __cxa_rethrow();
1265
1266 llvm::FunctionType *FTy =
1267 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1268
1269 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1270
1271 if (isNoReturn)
1272 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt);
1273 else
1275}
1276
1277static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1278 // void *__cxa_allocate_exception(size_t thrown_size);
1279
1280 llvm::FunctionType *FTy =
1281 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1282
1283 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1284}
1285
1286static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1287 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1288 // void (*dest) (void *));
1289
1290 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1291 llvm::FunctionType *FTy =
1292 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1293
1294 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1295}
1296
1297void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1298 QualType ThrowType = E->getSubExpr()->getType();
1299 // Now allocate the exception object.
1300 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1301 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1302
1303 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1304 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1305 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1306
1307 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1308 CGF.EmitAnyExprToExn(
1309 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1310
1311 // Now throw the exception.
1312 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1313 /*ForEH=*/true);
1314
1315 // The address of the destructor. If the exception type has a
1316 // trivial destructor (or isn't a record), we just pass null.
1317 llvm::Constant *Dtor = nullptr;
1318 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1319 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1320 if (!Record->hasTrivialDestructor()) {
1321 CXXDestructorDecl *DtorD = Record->getDestructor();
1322 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1323 }
1324 }
1325 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1326
1327 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1329}
1330
1331static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1332 // void *__dynamic_cast(const void *sub,
1333 // global_as const abi::__class_type_info *src,
1334 // global_as const abi::__class_type_info *dst,
1335 // std::ptrdiff_t src2dst_offset);
1336
1337 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1338 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1339 llvm::Type *PtrDiffTy =
1341
1342 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1343
1344 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1345
1346 // Mark the function as nounwind willreturn readonly.
1347 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1348 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1349 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
1350 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
1351 llvm::AttributeList Attrs = llvm::AttributeList::get(
1352 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1353
1354 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1355}
1356
1357static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1358 // void __cxa_bad_cast();
1359 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1360 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1361}
1362
1363/// Compute the src2dst_offset hint as described in the
1364/// Itanium C++ ABI [2.9.7]
1366 const CXXRecordDecl *Src,
1367 const CXXRecordDecl *Dst) {
1368 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1369 /*DetectVirtual=*/false);
1370
1371 // If Dst is not derived from Src we can skip the whole computation below and
1372 // return that Src is not a public base of Dst. Record all inheritance paths.
1373 if (!Dst->isDerivedFrom(Src, Paths))
1374 return CharUnits::fromQuantity(-2ULL);
1375
1376 unsigned NumPublicPaths = 0;
1377 CharUnits Offset;
1378
1379 // Now walk all possible inheritance paths.
1380 for (const CXXBasePath &Path : Paths) {
1381 if (Path.Access != AS_public) // Ignore non-public inheritance.
1382 continue;
1383
1384 ++NumPublicPaths;
1385
1386 for (const CXXBasePathElement &PathElement : Path) {
1387 // If the path contains a virtual base class we can't give any hint.
1388 // -1: no hint.
1389 if (PathElement.Base->isVirtual())
1390 return CharUnits::fromQuantity(-1ULL);
1391
1392 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1393 continue;
1394
1395 // Accumulate the base class offsets.
1396 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1397 Offset += L.getBaseClassOffset(
1398 PathElement.Base->getType()->getAsCXXRecordDecl());
1399 }
1400 }
1401
1402 // -2: Src is not a public base of Dst.
1403 if (NumPublicPaths == 0)
1404 return CharUnits::fromQuantity(-2ULL);
1405
1406 // -3: Src is a multiple public base type but never a virtual base type.
1407 if (NumPublicPaths > 1)
1408 return CharUnits::fromQuantity(-3ULL);
1409
1410 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1411 // Return the offset of Src from the origin of Dst.
1412 return Offset;
1413}
1414
1415static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1416 // void __cxa_bad_typeid();
1417 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1418
1419 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1420}
1421
1422bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
1423 return true;
1424}
1425
1426void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1427 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1428 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1429 Call->setDoesNotReturn();
1430 CGF.Builder.CreateUnreachable();
1431}
1432
1433llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1434 QualType SrcRecordTy,
1435 Address ThisPtr,
1436 llvm::Type *StdTypeInfoPtrTy) {
1437 auto *ClassDecl =
1438 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1439 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
1440 ClassDecl);
1441
1442 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1443 // Load the type info.
1444 Value = CGF.Builder.CreateCall(
1445 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1446 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1447 } else {
1448 // Load the type info.
1449 Value =
1450 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1451 }
1452 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1453 CGF.getPointerAlign());
1454}
1455
1456bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1457 QualType SrcRecordTy) {
1458 return SrcIsPtr;
1459}
1460
1461llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1462 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1463 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1464 llvm::Type *PtrDiffLTy =
1466
1467 llvm::Value *SrcRTTI =
1469 llvm::Value *DestRTTI =
1471
1472 // Compute the offset hint.
1473 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1474 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1475 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1476 PtrDiffLTy,
1477 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1478
1479 // Emit the call to __dynamic_cast.
1480 llvm::Value *Args[] = {ThisAddr.emitRawPointer(CGF), SrcRTTI, DestRTTI,
1481 OffsetHint};
1482 llvm::Value *Value =
1484
1485 /// C++ [expr.dynamic.cast]p9:
1486 /// A failed cast to reference type throws std::bad_cast
1487 if (DestTy->isReferenceType()) {
1488 llvm::BasicBlock *BadCastBlock =
1489 CGF.createBasicBlock("dynamic_cast.bad_cast");
1490
1491 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1492 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1493
1494 CGF.EmitBlock(BadCastBlock);
1495 EmitBadCastCall(CGF);
1496 }
1497
1498 return Value;
1499}
1500
1501llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1502 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1503 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
1504 llvm::BasicBlock *CastFail) {
1505 ASTContext &Context = getContext();
1506
1507 // Find all the inheritance paths.
1508 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1509 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1510 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1511 /*DetectVirtual=*/false);
1512 (void)DestDecl->isDerivedFrom(SrcDecl, Paths);
1513
1514 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1515 // might appear.
1516 std::optional<CharUnits> Offset;
1517 for (const CXXBasePath &Path : Paths) {
1518 // dynamic_cast only finds public inheritance paths.
1519 if (Path.Access != AS_public)
1520 continue;
1521
1522 CharUnits PathOffset;
1523 for (const CXXBasePathElement &PathElement : Path) {
1524 // Find the offset along this inheritance step.
1525 const CXXRecordDecl *Base =
1526 PathElement.Base->getType()->getAsCXXRecordDecl();
1527 if (PathElement.Base->isVirtual()) {
1528 // For a virtual base class, we know that the derived class is exactly
1529 // DestDecl, so we can use the vbase offset from its layout.
1530 const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl);
1531 PathOffset = L.getVBaseClassOffset(Base);
1532 } else {
1533 const ASTRecordLayout &L =
1534 Context.getASTRecordLayout(PathElement.Class);
1535 PathOffset += L.getBaseClassOffset(Base);
1536 }
1537 }
1538
1539 if (!Offset)
1540 Offset = PathOffset;
1541 else if (Offset != PathOffset) {
1542 // Base appears in at least two different places. Find the most-derived
1543 // object and see if it's a DestDecl. Note that the most-derived object
1544 // must be at least as aligned as this base class subobject, and must
1545 // have a vptr at offset 0.
1546 ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
1547 CGF.VoidPtrTy, ThisAddr.getAlignment());
1548 SrcDecl = DestDecl;
1549 Offset = CharUnits::Zero();
1550 break;
1551 }
1552 }
1553
1554 if (!Offset) {
1555 // If there are no public inheritance paths, the cast always fails.
1556 CGF.EmitBranch(CastFail);
1557 return llvm::PoisonValue::get(CGF.VoidPtrTy);
1558 }
1559
1560 // Compare the vptr against the expected vptr for the destination type at
1561 // this offset. Note that we do not know what type ThisAddr points to in
1562 // the case where the derived class multiply inherits from the base class
1563 // so we can't use GetVTablePtr, so we load the vptr directly instead.
1564 llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
1565 ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
1566 CGM.DecorateInstructionWithTBAA(
1567 VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
1568 llvm::Value *Success = CGF.Builder.CreateICmpEQ(
1569 VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
1570 llvm::Value *Result = ThisAddr.emitRawPointer(CGF);
1571 if (!Offset->isZero())
1572 Result = CGF.Builder.CreateInBoundsGEP(
1573 CGF.CharTy, Result,
1574 {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
1575 CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
1576 return Result;
1577}
1578
1579llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1580 Address ThisAddr,
1581 QualType SrcRecordTy) {
1582 auto *ClassDecl =
1583 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1584 llvm::Value *OffsetToTop;
1585 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1586 // Get the vtable pointer.
1587 llvm::Value *VTable =
1588 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1589
1590 // Get the offset-to-top from the vtable.
1591 OffsetToTop =
1592 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1593 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1594 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1595 } else {
1596 llvm::Type *PtrDiffLTy =
1598
1599 // Get the vtable pointer.
1600 llvm::Value *VTable =
1601 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1602
1603 // Get the offset-to-top from the vtable.
1604 OffsetToTop =
1605 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1606 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1607 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1608 }
1609 // Finally, add the offset to the pointer.
1610 return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.emitRawPointer(CGF),
1611 OffsetToTop);
1612}
1613
1614bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1615 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1616 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1617 Call->setDoesNotReturn();
1618 CGF.Builder.CreateUnreachable();
1619 return true;
1620}
1621
1622llvm::Value *
1623ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1624 Address This,
1625 const CXXRecordDecl *ClassDecl,
1626 const CXXRecordDecl *BaseClassDecl) {
1627 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1628 CharUnits VBaseOffsetOffset =
1629 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1630 BaseClassDecl);
1631 llvm::Value *VBaseOffsetPtr =
1632 CGF.Builder.CreateConstGEP1_64(
1633 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1634 "vbase.offset.ptr");
1635
1636 llvm::Value *VBaseOffset;
1637 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1638 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1639 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1640 "vbase.offset");
1641 } else {
1642 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1643 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1644 }
1645 return VBaseOffset;
1646}
1647
1648void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1649 // Just make sure we're in sync with TargetCXXABI.
1650 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1651
1652 // The constructor used for constructing this as a base class;
1653 // ignores virtual bases.
1654 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1655
1656 // The constructor used for constructing this as a complete class;
1657 // constructs the virtual bases, then calls the base constructor.
1658 if (!D->getParent()->isAbstract()) {
1659 // We don't need to emit the complete ctor if the class is abstract.
1660 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1661 }
1662}
1663
1665ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1667 ASTContext &Context = getContext();
1668
1669 // All parameters are already in place except VTT, which goes after 'this'.
1670 // These are Clang types, so we don't need to worry about sret yet.
1671
1672 // Check if we need to add a VTT parameter (which has type global void **).
1673 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1674 : GD.getDtorType() == Dtor_Base) &&
1675 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1676 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1677 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1678 ArgTys.insert(ArgTys.begin() + 1,
1680 return AddedStructorArgCounts::prefix(1);
1681 }
1682 return AddedStructorArgCounts{};
1683}
1684
1685void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1686 // The destructor used for destructing this as a base class; ignores
1687 // virtual bases.
1688 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1689
1690 // The destructor used for destructing this as a most-derived class;
1691 // call the base destructor and then destructs any virtual bases.
1692 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1693
1694 // The destructor in a virtual table is always a 'deleting'
1695 // destructor, which calls the complete destructor and then uses the
1696 // appropriate operator delete.
1697 if (D->isVirtual())
1698 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1699}
1700
1701void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1702 QualType &ResTy,
1703 FunctionArgList &Params) {
1704 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1705 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1706
1707 // Check if we need a VTT parameter as well.
1708 if (NeedsVTTParameter(CGF.CurGD)) {
1709 ASTContext &Context = getContext();
1710
1711 // FIXME: avoid the fake decl
1712 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1713 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1714 QualType T = Context.getPointerType(Q);
1715 auto *VTTDecl = ImplicitParamDecl::Create(
1716 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1717 T, ImplicitParamKind::CXXVTT);
1718 Params.insert(Params.begin() + 1, VTTDecl);
1719 getStructorImplicitParamDecl(CGF) = VTTDecl;
1720 }
1721}
1722
1723void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1724 // Naked functions have no prolog.
1725 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1726 return;
1727
1728 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1729 /// adjustments are required, because they are all handled by thunks.
1730 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1731
1732 /// Initialize the 'vtt' slot if needed.
1733 if (getStructorImplicitParamDecl(CGF)) {
1734 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1735 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1736 }
1737
1738 /// If this is a function that the ABI specifies returns 'this', initialize
1739 /// the return slot to 'this' at the start of the function.
1740 ///
1741 /// Unlike the setting of return types, this is done within the ABI
1742 /// implementation instead of by clients of CGCXXABI because:
1743 /// 1) getThisValue is currently protected
1744 /// 2) in theory, an ABI could implement 'this' returns some other way;
1745 /// HasThisReturn only specifies a contract, not the implementation
1746 if (HasThisReturn(CGF.CurGD))
1747 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1748}
1749
1750CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1752 bool ForVirtualBase, bool Delegating) {
1753 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1754 return AddedStructorArgs{};
1755
1756 // Insert the implicit 'vtt' argument as the second argument. Make sure to
1757 // correctly reflect its address space, which can differ from generic on
1758 // some targets.
1759 llvm::Value *VTT =
1760 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1761 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1762 QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS);
1763 QualType VTTTy = getContext().getPointerType(Q);
1764 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1765}
1766
1767llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1769 bool ForVirtualBase, bool Delegating) {
1770 GlobalDecl GD(DD, Type);
1771 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1772}
1773
1774void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1775 const CXXDestructorDecl *DD,
1776 CXXDtorType Type, bool ForVirtualBase,
1777 bool Delegating, Address This,
1778 QualType ThisTy) {
1779 GlobalDecl GD(DD, Type);
1780 llvm::Value *VTT =
1781 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1782 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1783
1785 if (getContext().getLangOpts().AppleKext &&
1786 Type != Dtor_Base && DD->isVirtual())
1788 else
1789 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1790
1791 CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy),
1792 ThisTy, VTT, VTTTy, nullptr);
1793}
1794
1795// Check if any non-inline method has the specified attribute.
1796template <typename T>
1798 for (const auto *D : RD->noload_decls()) {
1799 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1800 if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
1801 FD->isPureVirtual())
1802 continue;
1803 if (D->hasAttr<T>())
1804 return true;
1805 }
1806 }
1807
1808 return false;
1809}
1810
1812 llvm::GlobalVariable *VTable,
1813 const CXXRecordDecl *RD) {
1814 if (VTable->getDLLStorageClass() !=
1815 llvm::GlobalVariable::DefaultStorageClass ||
1816 RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
1817 return;
1818
1819 if (CGM.getVTables().isVTableExternal(RD)) {
1820 if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
1821 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1822 } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
1823 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1824}
1825
1826void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1827 const CXXRecordDecl *RD) {
1828 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1829 if (VTable->hasInitializer())
1830 return;
1831
1832 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1833 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1834 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1835 llvm::Constant *RTTI =
1836 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1837
1838 // Create and set the initializer.
1839 ConstantInitBuilder builder(CGM);
1840 auto components = builder.beginStruct();
1841 CGVT.createVTableInitializer(components, VTLayout, RTTI,
1842 llvm::GlobalValue::isLocalLinkage(Linkage));
1843 components.finishAndSetAsInitializer(VTable);
1844
1845 // Set the correct linkage.
1846 VTable->setLinkage(Linkage);
1847
1848 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1849 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1850
1851 if (CGM.getTarget().hasPS4DLLImportExport())
1852 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
1853
1854 // Set the right visibility.
1855 CGM.setGVProperties(VTable, RD);
1856
1857 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1858 // we will emit the typeinfo for the fundamental types. This is the
1859 // same behaviour as GCC.
1860 const DeclContext *DC = RD->getDeclContext();
1861 if (RD->getIdentifier() &&
1862 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1863 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1864 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1866 EmitFundamentalRTTIDescriptors(RD);
1867
1868 // Always emit type metadata on non-available_externally definitions, and on
1869 // available_externally definitions if we are performing whole program
1870 // devirtualization. For WPD we need the type metadata on all vtable
1871 // definitions to ensure we associate derived classes with base classes
1872 // defined in headers but with a strong definition only in a shared library.
1873 if (!VTable->isDeclarationForLinker() ||
1874 CGM.getCodeGenOpts().WholeProgramVTables) {
1875 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1876 // For available_externally definitions, add the vtable to
1877 // @llvm.compiler.used so that it isn't deleted before whole program
1878 // analysis.
1879 if (VTable->isDeclarationForLinker()) {
1880 assert(CGM.getCodeGenOpts().WholeProgramVTables);
1881 CGM.addCompilerUsedGlobal(VTable);
1882 }
1883 }
1884
1885 if (VTContext.isRelativeLayout()) {
1886 CGVT.RemoveHwasanMetadata(VTable);
1887 if (!VTable->isDSOLocal())
1888 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1889 }
1890}
1891
1892bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1893 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1894 if (Vptr.NearestVBase == nullptr)
1895 return false;
1896 return NeedsVTTParameter(CGF.CurGD);
1897}
1898
1899llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1900 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1901 const CXXRecordDecl *NearestVBase) {
1902
1903 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1904 NeedsVTTParameter(CGF.CurGD)) {
1905 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1906 NearestVBase);
1907 }
1908 return getVTableAddressPoint(Base, VTableClass);
1909}
1910
1911llvm::Constant *
1912ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1913 const CXXRecordDecl *VTableClass) {
1914 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1915
1916 // Find the appropriate vtable within the vtable group, and the address point
1917 // within that vtable.
1918 const VTableLayout &Layout =
1919 CGM.getItaniumVTableContext().getVTableLayout(VTableClass);
1921 Layout.getAddressPoint(Base);
1922 llvm::Value *Indices[] = {
1923 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1924 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1925 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1926 };
1927
1928 // Add inrange attribute to indicate that only the VTableIndex can be
1929 // accessed.
1930 unsigned ComponentSize =
1931 CGM.getDataLayout().getTypeAllocSize(CGM.getVTableComponentType());
1932 unsigned VTableSize =
1933 ComponentSize * Layout.getVTableSize(AddressPoint.VTableIndex);
1934 unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
1935 llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
1936 llvm::APInt(32, VTableSize - Offset, true));
1937 return llvm::ConstantExpr::getGetElementPtr(
1938 VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange);
1939}
1940
1941llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1942 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1943 const CXXRecordDecl *NearestVBase) {
1944 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1945 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1946
1947 // Get the secondary vpointer index.
1948 uint64_t VirtualPointerIndex =
1949 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1950
1951 /// Load the VTT.
1952 llvm::Value *VTT = CGF.LoadCXXVTT();
1953 if (VirtualPointerIndex)
1954 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT,
1955 VirtualPointerIndex);
1956
1957 // And load the address point from the VTT.
1958 return CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
1959 CGF.getPointerAlign());
1960}
1961
1962llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1963 CharUnits VPtrOffset) {
1964 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1965
1966 llvm::GlobalVariable *&VTable = VTables[RD];
1967 if (VTable)
1968 return VTable;
1969
1970 // Queue up this vtable for possible deferred emission.
1971 CGM.addDeferredVTable(RD);
1972
1973 SmallString<256> Name;
1974 llvm::raw_svector_ostream Out(Name);
1975 getMangleContext().mangleCXXVTable(RD, Out);
1976
1977 const VTableLayout &VTLayout =
1978 CGM.getItaniumVTableContext().getVTableLayout(RD);
1979 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1980
1981 // Use pointer to global alignment for the vtable. Otherwise we would align
1982 // them based on the size of the initializer which doesn't make sense as only
1983 // single values are read.
1984 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1985 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1986 ? 32
1987 : CGM.getTarget().getPointerAlign(AS);
1988
1989 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1990 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1991 getContext().toCharUnitsFromBits(PAlign).getAsAlign());
1992 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1993
1994 if (CGM.getTarget().hasPS4DLLImportExport())
1995 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
1996
1997 CGM.setGVProperties(VTable, RD);
1998 return VTable;
1999}
2000
2001CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2002 GlobalDecl GD,
2003 Address This,
2004 llvm::Type *Ty,
2006 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2007 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
2008 llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
2009
2010 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2011 llvm::Value *VFunc;
2012 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
2013 VFunc = CGF.EmitVTableTypeCheckedLoad(
2014 MethodDecl->getParent(), VTable, PtrTy,
2015 VTableIndex *
2016 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
2017 8);
2018 } else {
2019 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
2020
2021 llvm::Value *VFuncLoad;
2022 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2023 VFuncLoad = CGF.Builder.CreateCall(
2024 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
2025 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
2026 } else {
2027 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2028 PtrTy, VTable, VTableIndex, "vfn");
2029 VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
2030 CGF.getPointerAlign());
2031 }
2032
2033 // Add !invariant.load md to virtual function load to indicate that
2034 // function didn't change inside vtable.
2035 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2036 // help in devirtualization because it will only matter if we will have 2
2037 // the same virtual function loads from the same vtable load, which won't
2038 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2039 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2040 CGM.getCodeGenOpts().StrictVTablePointers) {
2041 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
2042 VFuncLoadInstr->setMetadata(
2043 llvm::LLVMContext::MD_invariant_load,
2044 llvm::MDNode::get(CGM.getLLVMContext(),
2046 }
2047 }
2048 VFunc = VFuncLoad;
2049 }
2050
2051 CGCallee Callee(GD, VFunc);
2052 return Callee;
2053}
2054
2055llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2056 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2057 Address This, DeleteOrMemberCallExpr E) {
2058 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2059 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2060 assert((CE != nullptr) ^ (D != nullptr));
2061 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2062 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2063
2064 GlobalDecl GD(Dtor, DtorType);
2065 const CGFunctionInfo *FInfo =
2066 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2067 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2068 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2069
2070 QualType ThisTy;
2071 if (CE) {
2072 ThisTy = CE->getObjectType();
2073 } else {
2074 ThisTy = D->getDestroyedType();
2075 }
2076
2077 CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy,
2078 nullptr, QualType(), nullptr);
2079 return nullptr;
2080}
2081
2082void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2083 CodeGenVTables &VTables = CGM.getVTables();
2084 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2085 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2086}
2087
2088bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2089 const CXXRecordDecl *RD) const {
2090 // We don't emit available_externally vtables if we are in -fapple-kext mode
2091 // because kext mode does not permit devirtualization.
2092 if (CGM.getLangOpts().AppleKext)
2093 return false;
2094
2095 // If the vtable is hidden then it is not safe to emit an available_externally
2096 // copy of vtable.
2097 if (isVTableHidden(RD))
2098 return false;
2099
2100 if (CGM.getCodeGenOpts().ForceEmitVTables)
2101 return true;
2102
2103 // If we don't have any not emitted inline virtual function then we are safe
2104 // to emit an available_externally copy of vtable.
2105 // FIXME we can still emit a copy of the vtable if we
2106 // can emit definition of the inline functions.
2107 if (hasAnyUnusedVirtualInlineFunction(RD))
2108 return false;
2109
2110 // For a class with virtual bases, we must also be able to speculatively
2111 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2112 // the vtable" and "can emit the VTT". For a base subobject, this means we
2113 // need to be able to emit non-virtual base vtables.
2114 if (RD->getNumVBases()) {
2115 for (const auto &B : RD->bases()) {
2116 auto *BRD = B.getType()->getAsCXXRecordDecl();
2117 assert(BRD && "no class for base specifier");
2118 if (B.isVirtual() || !BRD->isDynamicClass())
2119 continue;
2120 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2121 return false;
2122 }
2123 }
2124
2125 return true;
2126}
2127
2128bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2129 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2130 return false;
2131
2133 return false;
2134
2135 // For a complete-object vtable (or more specifically, for the VTT), we need
2136 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2137 for (const auto &B : RD->vbases()) {
2138 auto *BRD = B.getType()->getAsCXXRecordDecl();
2139 assert(BRD && "no class for base specifier");
2140 if (!BRD->isDynamicClass())
2141 continue;
2142 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2143 return false;
2144 }
2145
2146 return true;
2147}
2149 Address InitialPtr,
2150 int64_t NonVirtualAdjustment,
2151 int64_t VirtualAdjustment,
2152 bool IsReturnAdjustment) {
2153 if (!NonVirtualAdjustment && !VirtualAdjustment)
2154 return InitialPtr.emitRawPointer(CGF);
2155
2156 Address V = InitialPtr.withElementType(CGF.Int8Ty);
2157
2158 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2159 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2161 CharUnits::fromQuantity(NonVirtualAdjustment));
2162 }
2163
2164 // Perform the virtual adjustment if we have one.
2165 llvm::Value *ResultPtr;
2166 if (VirtualAdjustment) {
2167 Address VTablePtrPtr = V.withElementType(CGF.Int8PtrTy);
2168 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2169
2170 llvm::Value *Offset;
2171 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2172 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2174 // Load the adjustment offset from the vtable as a 32-bit int.
2175 Offset =
2176 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2178 } else {
2179 llvm::Type *PtrDiffTy =
2181
2182 // Load the adjustment offset from the vtable.
2183 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2184 CGF.getPointerAlign());
2185 }
2186 // Adjust our pointer.
2187 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getElementType(),
2188 V.emitRawPointer(CGF), Offset);
2189 } else {
2190 ResultPtr = V.emitRawPointer(CGF);
2191 }
2192
2193 // In a derived-to-base conversion, the non-virtual adjustment is
2194 // applied second.
2195 if (NonVirtualAdjustment && IsReturnAdjustment) {
2196 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2197 NonVirtualAdjustment);
2198 }
2199
2200 return ResultPtr;
2201}
2202
2203llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2204 Address This,
2205 const ThisAdjustment &TA) {
2206 return performTypeAdjustment(CGF, This, TA.NonVirtual,
2208 /*IsReturnAdjustment=*/false);
2209}
2210
2211llvm::Value *
2212ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2213 const ReturnAdjustment &RA) {
2214 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2216 /*IsReturnAdjustment=*/true);
2217}
2218
2219void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2220 RValue RV, QualType ResultType) {
2221 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2222 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2223
2224 // Destructor thunks in the ARM ABI have indeterminate results.
2225 llvm::Type *T = CGF.ReturnValue.getElementType();
2226 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2227 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2228}
2229
2230/************************** Array allocation cookies **************************/
2231
2232CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2233 // The array cookie is a size_t; pad that up to the element alignment.
2234 // The cookie is actually right-justified in that space.
2235 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2236 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2237}
2238
2239Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2240 Address NewPtr,
2241 llvm::Value *NumElements,
2242 const CXXNewExpr *expr,
2243 QualType ElementType) {
2244 assert(requiresArrayCookie(expr));
2245
2246 unsigned AS = NewPtr.getAddressSpace();
2247
2248 ASTContext &Ctx = getContext();
2249 CharUnits SizeSize = CGF.getSizeSize();
2250
2251 // The size of the cookie.
2252 CharUnits CookieSize =
2253 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2254 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2255
2256 // Compute an offset to the cookie.
2257 Address CookiePtr = NewPtr;
2258 CharUnits CookieOffset = CookieSize - SizeSize;
2259 if (!CookieOffset.isZero())
2260 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2261
2262 // Write the number of elements into the appropriate slot.
2263 Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy);
2264 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2265
2266 // Handle the array cookie specially in ASan.
2267 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2268 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2269 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2270 // The store to the CookiePtr does not need to be instrumented.
2271 SI->setNoSanitizeMetadata();
2272 llvm::FunctionType *FTy =
2273 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2274 llvm::FunctionCallee F =
2275 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2276 CGF.Builder.CreateCall(F, NumElementsPtr.emitRawPointer(CGF));
2277 }
2278
2279 // Finally, compute a pointer to the actual data buffer by skipping
2280 // over the cookie completely.
2281 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2282}
2283
2284llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2285 Address allocPtr,
2286 CharUnits cookieSize) {
2287 // The element size is right-justified in the cookie.
2288 Address numElementsPtr = allocPtr;
2289 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2290 if (!numElementsOffset.isZero())
2291 numElementsPtr =
2292 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2293
2294 unsigned AS = allocPtr.getAddressSpace();
2295 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2296 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2297 return CGF.Builder.CreateLoad(numElementsPtr);
2298 // In asan mode emit a function call instead of a regular load and let the
2299 // run-time deal with it: if the shadow is properly poisoned return the
2300 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2301 // We can't simply ignore this load using nosanitize metadata because
2302 // the metadata may be lost.
2303 llvm::FunctionType *FTy =
2304 llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
2305 llvm::FunctionCallee F =
2306 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2307 return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
2308}
2309
2310CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2311 // ARM says that the cookie is always:
2312 // struct array_cookie {
2313 // std::size_t element_size; // element_size != 0
2314 // std::size_t element_count;
2315 // };
2316 // But the base ABI doesn't give anything an alignment greater than
2317 // 8, so we can dismiss this as typical ABI-author blindness to
2318 // actual language complexity and round up to the element alignment.
2319 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2320 CGM.getContext().getTypeAlignInChars(elementType));
2321}
2322
2323Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2324 Address newPtr,
2325 llvm::Value *numElements,
2326 const CXXNewExpr *expr,
2327 QualType elementType) {
2328 assert(requiresArrayCookie(expr));
2329
2330 // The cookie is always at the start of the buffer.
2331 Address cookie = newPtr;
2332
2333 // The first element is the element size.
2334 cookie = cookie.withElementType(CGF.SizeTy);
2335 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2336 getContext().getTypeSizeInChars(elementType).getQuantity());
2337 CGF.Builder.CreateStore(elementSize, cookie);
2338
2339 // The second element is the element count.
2340 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2341 CGF.Builder.CreateStore(numElements, cookie);
2342
2343 // Finally, compute a pointer to the actual data buffer by skipping
2344 // over the cookie completely.
2345 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2346 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2347}
2348
2349llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2350 Address allocPtr,
2351 CharUnits cookieSize) {
2352 // The number of elements is at offset sizeof(size_t) relative to
2353 // the allocated pointer.
2354 Address numElementsPtr
2355 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2356
2357 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2358 return CGF.Builder.CreateLoad(numElementsPtr);
2359}
2360
2361/*********************** Static local initialization **************************/
2362
2363static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2364 llvm::PointerType *GuardPtrTy) {
2365 // int __cxa_guard_acquire(__guard *guard_object);
2366 llvm::FunctionType *FTy =
2367 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2368 GuardPtrTy, /*isVarArg=*/false);
2369 return CGM.CreateRuntimeFunction(
2370 FTy, "__cxa_guard_acquire",
2371 llvm::AttributeList::get(CGM.getLLVMContext(),
2372 llvm::AttributeList::FunctionIndex,
2373 llvm::Attribute::NoUnwind));
2374}
2375
2376static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2377 llvm::PointerType *GuardPtrTy) {
2378 // void __cxa_guard_release(__guard *guard_object);
2379 llvm::FunctionType *FTy =
2380 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2381 return CGM.CreateRuntimeFunction(
2382 FTy, "__cxa_guard_release",
2383 llvm::AttributeList::get(CGM.getLLVMContext(),
2384 llvm::AttributeList::FunctionIndex,
2385 llvm::Attribute::NoUnwind));
2386}
2387
2388static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2389 llvm::PointerType *GuardPtrTy) {
2390 // void __cxa_guard_abort(__guard *guard_object);
2391 llvm::FunctionType *FTy =
2392 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2393 return CGM.CreateRuntimeFunction(
2394 FTy, "__cxa_guard_abort",
2395 llvm::AttributeList::get(CGM.getLLVMContext(),
2396 llvm::AttributeList::FunctionIndex,
2397 llvm::Attribute::NoUnwind));
2398}
2399
2400namespace {
2401 struct CallGuardAbort final : EHScopeStack::Cleanup {
2402 llvm::GlobalVariable *Guard;
2403 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2404
2405 void Emit(CodeGenFunction &CGF, Flags flags) override {
2406 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2407 Guard);
2408 }
2409 };
2410}
2411
2412/// The ARM code here follows the Itanium code closely enough that we
2413/// just special-case it at particular places.
2414void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2415 const VarDecl &D,
2416 llvm::GlobalVariable *var,
2417 bool shouldPerformInit) {
2418 CGBuilderTy &Builder = CGF.Builder;
2419
2420 // Inline variables that weren't instantiated from variable templates have
2421 // partially-ordered initialization within their translation unit.
2422 bool NonTemplateInline =
2423 D.isInline() &&
2425
2426 // We only need to use thread-safe statics for local non-TLS variables and
2427 // inline variables; other global initialization is always single-threaded
2428 // or (through lazy dynamic loading in multiple threads) unsequenced.
2429 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2430 (D.isLocalVarDecl() || NonTemplateInline) &&
2431 !D.getTLSKind();
2432
2433 // If we have a global variable with internal linkage and thread-safe statics
2434 // are disabled, we can just let the guard variable be of type i8.
2435 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2436
2437 llvm::IntegerType *guardTy;
2438 CharUnits guardAlignment;
2439 if (useInt8GuardVariable) {
2440 guardTy = CGF.Int8Ty;
2441 guardAlignment = CharUnits::One();
2442 } else {
2443 // Guard variables are 64 bits in the generic ABI and size width on ARM
2444 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2445 if (UseARMGuardVarABI) {
2446 guardTy = CGF.SizeTy;
2447 guardAlignment = CGF.getSizeAlign();
2448 } else {
2449 guardTy = CGF.Int64Ty;
2450 guardAlignment =
2451 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
2452 }
2453 }
2454 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2455 CGF.CGM.getLLVMContext(),
2456 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2457
2458 // Create the guard variable if we don't already have it (as we
2459 // might if we're double-emitting this function body).
2460 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2461 if (!guard) {
2462 // Mangle the name for the guard.
2463 SmallString<256> guardName;
2464 {
2465 llvm::raw_svector_ostream out(guardName);
2466 getMangleContext().mangleStaticGuardVariable(&D, out);
2467 }
2468
2469 // Create the guard variable with a zero-initializer.
2470 // Just absorb linkage, visibility and dll storage class from the guarded
2471 // variable.
2472 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2473 false, var->getLinkage(),
2474 llvm::ConstantInt::get(guardTy, 0),
2475 guardName.str());
2476 guard->setDSOLocal(var->isDSOLocal());
2477 guard->setVisibility(var->getVisibility());
2478 guard->setDLLStorageClass(var->getDLLStorageClass());
2479 // If the variable is thread-local, so is its guard variable.
2480 guard->setThreadLocalMode(var->getThreadLocalMode());
2481 guard->setAlignment(guardAlignment.getAsAlign());
2482
2483 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2484 // group as the associated data object." In practice, this doesn't work for
2485 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2486 llvm::Comdat *C = var->getComdat();
2487 if (!D.isLocalVarDecl() && C &&
2488 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2489 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2490 guard->setComdat(C);
2491 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2492 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2493 }
2494
2495 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2496 }
2497
2498 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2499
2500 // Test whether the variable has completed initialization.
2501 //
2502 // Itanium C++ ABI 3.3.2:
2503 // The following is pseudo-code showing how these functions can be used:
2504 // if (obj_guard.first_byte == 0) {
2505 // if ( __cxa_guard_acquire (&obj_guard) ) {
2506 // try {
2507 // ... initialize the object ...;
2508 // } catch (...) {
2509 // __cxa_guard_abort (&obj_guard);
2510 // throw;
2511 // }
2512 // ... queue object destructor with __cxa_atexit() ...;
2513 // __cxa_guard_release (&obj_guard);
2514 // }
2515 // }
2516 //
2517 // If threadsafe statics are enabled, but we don't have inline atomics, just
2518 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2519 // actually inline, and the user might not expect calls to __atomic libcalls.
2520
2521 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2522 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2523 if (!threadsafe || MaxInlineWidthInBits) {
2524 // Load the first byte of the guard variable.
2525 llvm::LoadInst *LI =
2526 Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty));
2527
2528 // Itanium ABI:
2529 // An implementation supporting thread-safety on multiprocessor
2530 // systems must also guarantee that references to the initialized
2531 // object do not occur before the load of the initialization flag.
2532 //
2533 // In LLVM, we do this by marking the load Acquire.
2534 if (threadsafe)
2535 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2536
2537 // For ARM, we should only check the first bit, rather than the entire byte:
2538 //
2539 // ARM C++ ABI 3.2.3.1:
2540 // To support the potential use of initialization guard variables
2541 // as semaphores that are the target of ARM SWP and LDREX/STREX
2542 // synchronizing instructions we define a static initialization
2543 // guard variable to be a 4-byte aligned, 4-byte word with the
2544 // following inline access protocol.
2545 // #define INITIALIZED 1
2546 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2547 // if (__cxa_guard_acquire(&obj_guard))
2548 // ...
2549 // }
2550 //
2551 // and similarly for ARM64:
2552 //
2553 // ARM64 C++ ABI 3.2.2:
2554 // This ABI instead only specifies the value bit 0 of the static guard
2555 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2556 // variable is not initialized and 1 when it is.
2557 llvm::Value *V =
2558 (UseARMGuardVarABI && !useInt8GuardVariable)
2559 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2560 : LI;
2561 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2562
2563 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2564
2565 // Check if the first byte of the guard variable is zero.
2566 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2567 CodeGenFunction::GuardKind::VariableGuard, &D);
2568
2569 CGF.EmitBlock(InitCheckBlock);
2570 }
2571
2572 // The semantics of dynamic initialization of variables with static or thread
2573 // storage duration depends on whether they are declared at block-scope. The
2574 // initialization of such variables at block-scope can be aborted with an
2575 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2576 // to their initialization has undefined behavior (also per C++20
2577 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2578 // lead to termination (per C++20 [except.terminate]p1), and recursive
2579 // references to the variables are governed only by the lifetime rules (per
2580 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2581 // long as they avoid touching memory. As a result, block-scope variables must
2582 // not be marked as initialized until after initialization completes (unless
2583 // the mark is reverted following an exception), but non-block-scope variables
2584 // must be marked prior to initialization so that recursive accesses during
2585 // initialization do not restart initialization.
2586
2587 // Variables used when coping with thread-safe statics and exceptions.
2588 if (threadsafe) {
2589 // Call __cxa_guard_acquire.
2590 llvm::Value *V
2591 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2592
2593 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2594
2595 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2596 InitBlock, EndBlock);
2597
2598 // Call __cxa_guard_abort along the exceptional edge.
2599 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2600
2601 CGF.EmitBlock(InitBlock);
2602 } else if (!D.isLocalVarDecl()) {
2603 // For non-local variables, store 1 into the first byte of the guard
2604 // variable before the object initialization begins so that references
2605 // to the variable during initialization don't restart initialization.
2606 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2607 guardAddr.withElementType(CGM.Int8Ty));
2608 }
2609
2610 // Emit the initializer and add a global destructor if appropriate.
2611 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2612
2613 if (threadsafe) {
2614 // Pop the guard-abort cleanup if we pushed one.
2615 CGF.PopCleanupBlock();
2616
2617 // Call __cxa_guard_release. This cannot throw.
2618 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2619 guardAddr.emitRawPointer(CGF));
2620 } else if (D.isLocalVarDecl()) {
2621 // For local variables, store 1 into the first byte of the guard variable
2622 // after the object initialization completes so that initialization is
2623 // retried if initialization is interrupted by an exception.
2624 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2625 guardAddr.withElementType(CGM.Int8Ty));
2626 }
2627
2628 CGF.EmitBlock(EndBlock);
2629}
2630
2631/// Register a global destructor using __cxa_atexit.
2633 llvm::FunctionCallee dtor,
2634 llvm::Constant *addr, bool TLS) {
2635 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2636 "unexpected call to emitGlobalDtorWithCXAAtExit");
2637 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2638 "__cxa_atexit is disabled");
2639 const char *Name = "__cxa_atexit";
2640 if (TLS) {
2641 const llvm::Triple &T = CGF.getTarget().getTriple();
2642 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2643 }
2644
2645 // We're assuming that the destructor function is something we can
2646 // reasonably call with the default CC.
2647 llvm::Type *dtorTy = CGF.UnqualPtrTy;
2648
2649 // Preserve address space of addr.
2650 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2651 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS)
2652 : CGF.Int8PtrTy;
2653
2654 // Create a variable that binds the atexit to this shared object.
2655 llvm::Constant *handle =
2656 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2657 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2658 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2659
2660 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2661 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2662 llvm::FunctionType *atexitTy =
2663 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2664
2665 // Fetch the actual function.
2666 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2667 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2668 fn->setDoesNotThrow();
2669
2670 if (!addr)
2671 // addr is null when we are trying to register a dtor annotated with
2672 // __attribute__((destructor)) in a constructor function. Using null here is
2673 // okay because this argument is just passed back to the destructor
2674 // function.
2675 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2676
2677 llvm::Value *args[] = {dtor.getCallee(), addr, handle};
2678 CGF.EmitNounwindRuntimeCall(atexit, args);
2679}
2680
2682 StringRef FnName) {
2683 // Create a function that registers/unregisters destructors that have the same
2684 // priority.
2685 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2686 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2687 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2688
2689 return GlobalInitOrCleanupFn;
2690}
2691
2692void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2693 for (const auto &I : DtorsUsingAtExit) {
2694 int Priority = I.first;
2695 std::string GlobalCleanupFnName =
2696 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2697
2698 llvm::Function *GlobalCleanupFn =
2699 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2700
2701 CodeGenFunction CGF(*this);
2702 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2703 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2706
2707 // Get the destructor function type, void(*)(void).
2708 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2709
2710 // Destructor functions are run/unregistered in non-ascending
2711 // order of their priorities.
2712 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2713 auto itv = Dtors.rbegin();
2714 while (itv != Dtors.rend()) {
2715 llvm::Function *Dtor = *itv;
2716
2717 // We're assuming that the destructor function is something we can
2718 // reasonably call with the correct CC.
2719 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor);
2720 llvm::Value *NeedsDestruct =
2721 CGF.Builder.CreateIsNull(V, "needs_destruct");
2722
2723 llvm::BasicBlock *DestructCallBlock =
2724 CGF.createBasicBlock("destruct.call");
2725 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2726 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2727 // Check if unatexit returns a value of 0. If it does, jump to
2728 // DestructCallBlock, otherwise jump to EndBlock directly.
2729 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2730
2731 CGF.EmitBlock(DestructCallBlock);
2732
2733 // Emit the call to casted Dtor.
2734 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor);
2735 // Make sure the call and the callee agree on calling convention.
2736 CI->setCallingConv(Dtor->getCallingConv());
2737
2738 CGF.EmitBlock(EndBlock);
2739
2740 itv++;
2741 }
2742
2743 CGF.FinishFunction();
2744 AddGlobalDtor(GlobalCleanupFn, Priority);
2745 }
2746}
2747
2748void CodeGenModule::registerGlobalDtorsWithAtExit() {
2749 for (const auto &I : DtorsUsingAtExit) {
2750 int Priority = I.first;
2751 std::string GlobalInitFnName =
2752 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2753 llvm::Function *GlobalInitFn =
2754 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2755
2756 CodeGenFunction CGF(*this);
2757 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2758 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2761
2762 // Since constructor functions are run in non-descending order of their
2763 // priorities, destructors are registered in non-descending order of their
2764 // priorities, and since destructor functions are run in the reverse order
2765 // of their registration, destructor functions are run in non-ascending
2766 // order of their priorities.
2767 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2768 for (auto *Dtor : Dtors) {
2769 // Register the destructor function calling __cxa_atexit if it is
2770 // available. Otherwise fall back on calling atexit.
2771 if (getCodeGenOpts().CXAAtExit) {
2772 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2773 } else {
2774 // We're assuming that the destructor function is something we can
2775 // reasonably call with the correct CC.
2777 }
2778 }
2779
2780 CGF.FinishFunction();
2781 AddGlobalCtor(GlobalInitFn, Priority);
2782 }
2783
2784 if (getCXXABI().useSinitAndSterm())
2785 unregisterGlobalDtorsWithUnAtExit();
2786}
2787
2788/// Register a global destructor as best as we know how.
2789void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2790 llvm::FunctionCallee dtor,
2791 llvm::Constant *addr) {
2792 if (D.isNoDestroy(CGM.getContext()))
2793 return;
2794
2795 // OpenMP offloading supports C++ constructors and destructors but we do not
2796 // always have 'atexit' available. Instead lower these to use the LLVM global
2797 // destructors which we can handle directly in the runtime. Note that this is
2798 // not strictly 1-to-1 with using `atexit` because we no longer tear down
2799 // globals in reverse order of when they were constructed.
2800 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
2801 return CGF.registerGlobalDtorWithLLVM(D, dtor, addr);
2802
2803 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2804 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2805 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2806 // We can always use __cxa_thread_atexit.
2807 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2808 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2809
2810 // In Apple kexts, we want to add a global destructor entry.
2811 // FIXME: shouldn't this be guarded by some variable?
2812 if (CGM.getLangOpts().AppleKext) {
2813 // Generate a global destructor entry.
2814 return CGM.AddCXXDtorEntry(dtor, addr);
2815 }
2816
2817 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2818}
2819
2822 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2823 // Darwin prefers to have references to thread local variables to go through
2824 // the thread wrapper instead of directly referencing the backing variable.
2825 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2826 CGM.getTarget().getTriple().isOSDarwin();
2827}
2828
2829/// Get the appropriate linkage for the wrapper function. This is essentially
2830/// the weak form of the variable's linkage; every translation unit which needs
2831/// the wrapper emits a copy, and we want the linker to merge them.
2832static llvm::GlobalValue::LinkageTypes
2834 llvm::GlobalValue::LinkageTypes VarLinkage =
2836
2837 // For internal linkage variables, we don't need an external or weak wrapper.
2838 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2839 return VarLinkage;
2840
2841 // If the thread wrapper is replaceable, give it appropriate linkage.
2842 if (isThreadWrapperReplaceable(VD, CGM))
2843 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2844 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2845 return VarLinkage;
2846 return llvm::GlobalValue::WeakODRLinkage;
2847}
2848
2849llvm::Function *
2850ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2851 llvm::Value *Val) {
2852 // Mangle the name for the thread_local wrapper function.
2853 SmallString<256> WrapperName;
2854 {
2855 llvm::raw_svector_ostream Out(WrapperName);
2856 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2857 }
2858
2859 // FIXME: If VD is a definition, we should regenerate the function attributes
2860 // before returning.
2861 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2862 return cast<llvm::Function>(V);
2863
2864 QualType RetQT = VD->getType();
2865 if (RetQT->isReferenceType())
2866 RetQT = RetQT.getNonReferenceType();
2867
2868 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2869 getContext().getPointerType(RetQT), FunctionArgList());
2870
2871 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2872 llvm::Function *Wrapper =
2873 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2874 WrapperName.str(), &CGM.getModule());
2875
2876 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2877 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2878
2879 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2880
2881 // Always resolve references to the wrapper at link time.
2882 if (!Wrapper->hasLocalLinkage())
2883 if (!isThreadWrapperReplaceable(VD, CGM) ||
2884 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2885 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2887 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2888
2889 if (isThreadWrapperReplaceable(VD, CGM)) {
2890 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2891 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2892 }
2893
2894 ThreadWrappers.push_back({VD, Wrapper});
2895 return Wrapper;
2896}
2897
2898void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2899 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2900 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2901 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2902 llvm::Function *InitFunc = nullptr;
2903
2904 // Separate initializers into those with ordered (or partially-ordered)
2905 // initialization and those with unordered initialization.
2907 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2908 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2910 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2911 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2912 CXXThreadLocalInits[I];
2913 else
2914 OrderedInits.push_back(CXXThreadLocalInits[I]);
2915 }
2916
2917 if (!OrderedInits.empty()) {
2918 // Generate a guarded initialization function.
2919 llvm::FunctionType *FTy =
2920 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2922 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2924 /*TLS=*/true);
2925 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2926 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2927 llvm::GlobalVariable::InternalLinkage,
2928 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2929 Guard->setThreadLocal(true);
2930 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2931
2932 CharUnits GuardAlign = CharUnits::One();
2933 Guard->setAlignment(GuardAlign.getAsAlign());
2934
2936 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2937 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2938 if (CGM.getTarget().getTriple().isOSDarwin()) {
2939 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2940 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2941 }
2942 }
2943
2944 // Create declarations for thread wrappers for all thread-local variables
2945 // with non-discardable definitions in this translation unit.
2946 for (const VarDecl *VD : CXXThreadLocals) {
2947 if (VD->hasDefinition() &&
2948 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2949 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2950 getOrCreateThreadLocalWrapper(VD, GV);
2951 }
2952 }
2953
2954 // Emit all referenced thread wrappers.
2955 for (auto VDAndWrapper : ThreadWrappers) {
2956 const VarDecl *VD = VDAndWrapper.first;
2957 llvm::GlobalVariable *Var =
2958 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2959 llvm::Function *Wrapper = VDAndWrapper.second;
2960
2961 // Some targets require that all access to thread local variables go through
2962 // the thread wrapper. This means that we cannot attempt to create a thread
2963 // wrapper or a thread helper.
2964 if (!VD->hasDefinition()) {
2965 if (isThreadWrapperReplaceable(VD, CGM)) {
2966 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2967 continue;
2968 }
2969
2970 // If this isn't a TU in which this variable is defined, the thread
2971 // wrapper is discardable.
2972 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2973 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2974 }
2975
2976 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2977
2978 // Mangle the name for the thread_local initialization function.
2979 SmallString<256> InitFnName;
2980 {
2981 llvm::raw_svector_ostream Out(InitFnName);
2982 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2983 }
2984
2985 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2986
2987 // If we have a definition for the variable, emit the initialization
2988 // function as an alias to the global Init function (if any). Otherwise,
2989 // produce a declaration of the initialization function.
2990 llvm::GlobalValue *Init = nullptr;
2991 bool InitIsInitFunc = false;
2992 bool HasConstantInitialization = false;
2993 if (!usesThreadWrapperFunction(VD)) {
2994 HasConstantInitialization = true;
2995 } else if (VD->hasDefinition()) {
2996 InitIsInitFunc = true;
2997 llvm::Function *InitFuncToUse = InitFunc;
2999 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
3000 if (InitFuncToUse)
3001 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
3002 InitFuncToUse);
3003 } else {
3004 // Emit a weak global function referring to the initialization function.
3005 // This function will not exist if the TU defining the thread_local
3006 // variable in question does not need any dynamic initialization for
3007 // its thread_local variables.
3008 Init = llvm::Function::Create(InitFnTy,
3009 llvm::GlobalVariable::ExternalWeakLinkage,
3010 InitFnName.str(), &CGM.getModule());
3013 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
3014 }
3015
3016 if (Init) {
3017 Init->setVisibility(Var->getVisibility());
3018 // Don't mark an extern_weak function DSO local on windows.
3019 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3020 Init->setDSOLocal(Var->isDSOLocal());
3021 }
3022
3023 llvm::LLVMContext &Context = CGM.getModule().getContext();
3024
3025 // The linker on AIX is not happy with missing weak symbols. However,
3026 // other TUs will not know whether the initialization routine exists
3027 // so create an empty, init function to satisfy the linker.
3028 // This is needed whenever a thread wrapper function is not used, and
3029 // also when the symbol is weak.
3030 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3031 isEmittedWithConstantInitializer(VD, true) &&
3032 !mayNeedDestruction(VD)) {
3033 // Init should be null. If it were non-null, then the logic above would
3034 // either be defining the function to be an alias or declaring the
3035 // function with the expectation that the definition of the variable
3036 // is elsewhere.
3037 assert(Init == nullptr && "Expected Init to be null.");
3038
3039 llvm::Function *Func = llvm::Function::Create(
3040 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
3043 cast<llvm::Function>(Func),
3044 /*IsThunk=*/false);
3045 // Create a function body that just returns
3046 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
3047 CGBuilderTy Builder(CGM, Entry);
3048 Builder.CreateRetVoid();
3049 }
3050
3051 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
3052 CGBuilderTy Builder(CGM, Entry);
3053 if (HasConstantInitialization) {
3054 // No dynamic initialization to invoke.
3055 } else if (InitIsInitFunc) {
3056 if (Init) {
3057 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
3058 if (isThreadWrapperReplaceable(VD, CGM)) {
3059 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3060 llvm::Function *Fn =
3061 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3062 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3063 }
3064 }
3065 } else if (CGM.getTriple().isOSAIX()) {
3066 // On AIX, except if constinit and also neither of class type or of
3067 // (possibly multi-dimensional) array of class type, thread_local vars
3068 // will have init routines regardless of whether they are
3069 // const-initialized. Since the routine is guaranteed to exist, we can
3070 // unconditionally call it without testing for its existance. This
3071 // avoids potentially unresolved weak symbols which the AIX linker
3072 // isn't happy with.
3073 Builder.CreateCall(InitFnTy, Init);
3074 } else {
3075 // Don't know whether we have an init function. Call it if it exists.
3076 llvm::Value *Have = Builder.CreateIsNotNull(Init);
3077 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3078 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3079 Builder.CreateCondBr(Have, InitBB, ExitBB);
3080
3081 Builder.SetInsertPoint(InitBB);
3082 Builder.CreateCall(InitFnTy, Init);
3083 Builder.CreateBr(ExitBB);
3084
3085 Builder.SetInsertPoint(ExitBB);
3086 }
3087
3088 // For a reference, the result of the wrapper function is a pointer to
3089 // the referenced object.
3090 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
3091
3092 if (VD->getType()->isReferenceType()) {
3093 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3094 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3095 }
3096
3097 Builder.CreateRet(Val);
3098 }
3099}
3100
3101LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3102 const VarDecl *VD,
3103 QualType LValType) {
3104 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3105 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3106
3107 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3108 CallVal->setCallingConv(Wrapper->getCallingConv());
3109
3110 LValue LV;
3111 if (VD->getType()->isReferenceType())
3112 LV = CGF.MakeNaturalAlignRawAddrLValue(CallVal, LValType);
3113 else
3114 LV = CGF.MakeRawAddrLValue(CallVal, LValType,
3115 CGF.getContext().getDeclAlign(VD));
3116 // FIXME: need setObjCGCLValueClass?
3117 return LV;
3118}
3119
3120/// Return whether the given global decl needs a VTT parameter, which it does
3121/// if it's a base constructor or destructor with virtual bases.
3122bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3123 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3124
3125 // We don't have any virtual bases, just return early.
3126 if (!MD->getParent()->getNumVBases())
3127 return false;
3128
3129 // Check if we have a base constructor.
3130 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3131 return true;
3132
3133 // Check if we have a base destructor.
3134 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3135 return true;
3136
3137 return false;
3138}
3139
3140namespace {
3141class ItaniumRTTIBuilder {
3142 CodeGenModule &CGM; // Per-module state.
3143 llvm::LLVMContext &VMContext;
3144 const ItaniumCXXABI &CXXABI; // Per-module state.
3145
3146 /// Fields - The fields of the RTTI descriptor currently being built.
3148
3149 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3150 llvm::GlobalVariable *
3151 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3152
3153 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3154 /// descriptor of the given type.
3155 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3156
3157 /// BuildVTablePointer - Build the vtable pointer for the given type.
3158 void BuildVTablePointer(const Type *Ty);
3159
3160 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3161 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3162 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3163
3164 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3165 /// classes with bases that do not satisfy the abi::__si_class_type_info
3166 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3167 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3168
3169 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3170 /// for pointer types.
3171 void BuildPointerTypeInfo(QualType PointeeTy);
3172
3173 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3174 /// type_info for an object type.
3175 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3176
3177 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3178 /// struct, used for member pointer types.
3179 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3180
3181public:
3182 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3183 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3184
3185 // Pointer type info flags.
3186 enum {
3187 /// PTI_Const - Type has const qualifier.
3188 PTI_Const = 0x1,
3189
3190 /// PTI_Volatile - Type has volatile qualifier.
3191 PTI_Volatile = 0x2,
3192
3193 /// PTI_Restrict - Type has restrict qualifier.
3194 PTI_Restrict = 0x4,
3195
3196 /// PTI_Incomplete - Type is incomplete.
3197 PTI_Incomplete = 0x8,
3198
3199 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3200 /// (in pointer to member).
3201 PTI_ContainingClassIncomplete = 0x10,
3202
3203 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3204 //PTI_TransactionSafe = 0x20,
3205
3206 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3207 PTI_Noexcept = 0x40,
3208 };
3209
3210 // VMI type info flags.
3211 enum {
3212 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3213 VMI_NonDiamondRepeat = 0x1,
3214
3215 /// VMI_DiamondShaped - Class is diamond shaped.
3216 VMI_DiamondShaped = 0x2
3217 };
3218
3219 // Base class type info flags.
3220 enum {
3221 /// BCTI_Virtual - Base class is virtual.
3222 BCTI_Virtual = 0x1,
3223
3224 /// BCTI_Public - Base class is public.
3225 BCTI_Public = 0x2
3226 };
3227
3228 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3229 /// link to an existing RTTI descriptor if one already exists.
3230 llvm::Constant *BuildTypeInfo(QualType Ty);
3231
3232 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3233 llvm::Constant *BuildTypeInfo(
3234 QualType Ty,
3235 llvm::GlobalVariable::LinkageTypes Linkage,
3236 llvm::GlobalValue::VisibilityTypes Visibility,
3237 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3238};
3239}
3240
3241llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3242 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3243 SmallString<256> Name;
3244 llvm::raw_svector_ostream Out(Name);
3246
3247 // We know that the mangled name of the type starts at index 4 of the
3248 // mangled name of the typename, so we can just index into it in order to
3249 // get the mangled name of the type.
3250 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3251 Name.substr(4));
3252 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3253
3254 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3255 Name, Init->getType(), Linkage, Align.getAsAlign());
3256
3257 GV->setInitializer(Init);
3258
3259 return GV;
3260}
3261
3262llvm::Constant *
3263ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3264 // Mangle the RTTI name.
3265 SmallString<256> Name;
3266 llvm::raw_svector_ostream Out(Name);
3267 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3268
3269 // Look for an existing global.
3270 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3271
3272 if (!GV) {
3273 // Create a new global variable.
3274 // Note for the future: If we would ever like to do deferred emission of
3275 // RTTI, check if emitting vtables opportunistically need any adjustment.
3276
3277 GV = new llvm::GlobalVariable(
3278 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3279 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3280 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3281 CGM.setGVProperties(GV, RD);
3282 // Import the typeinfo symbol when all non-inline virtual methods are
3283 // imported.
3284 if (CGM.getTarget().hasPS4DLLImportExport()) {
3285 if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
3286 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3287 CGM.setDSOLocal(GV);
3288 }
3289 }
3290 }
3291
3292 return GV;
3293}
3294
3295/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3296/// info for that type is defined in the standard library.
3298 // Itanium C++ ABI 2.9.2:
3299 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3300 // the run-time support library. Specifically, the run-time support
3301 // library should contain type_info objects for the types X, X* and
3302 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3303 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3304 // long, unsigned long, long long, unsigned long long, float, double,
3305 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3306 // half-precision floating point types.
3307 //
3308 // GCC also emits RTTI for __int128.
3309 // FIXME: We do not emit RTTI information for decimal types here.
3310
3311 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3312 switch (Ty->getKind()) {
3313 case BuiltinType::Void:
3314 case BuiltinType::NullPtr:
3315 case BuiltinType::Bool:
3316 case BuiltinType::WChar_S:
3317 case BuiltinType::WChar_U:
3318 case BuiltinType::Char_U:
3319 case BuiltinType::Char_S:
3320 case BuiltinType::UChar:
3321 case BuiltinType::SChar:
3322 case BuiltinType::Short:
3323 case BuiltinType::UShort:
3324 case BuiltinType::Int:
3325 case BuiltinType::UInt:
3326 case BuiltinType::Long:
3327 case BuiltinType::ULong:
3328 case BuiltinType::LongLong:
3329 case BuiltinType::ULongLong:
3330 case BuiltinType::Half:
3331 case BuiltinType::Float:
3332 case BuiltinType::Double:
3333 case BuiltinType::LongDouble:
3334 case BuiltinType::Float16:
3335 case BuiltinType::Float128:
3336 case BuiltinType::Ibm128:
3337 case BuiltinType::Char8:
3338 case BuiltinType::Char16:
3339 case BuiltinType::Char32:
3340 case BuiltinType::Int128:
3341 case BuiltinType::UInt128:
3342 return true;
3343
3344#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3345 case BuiltinType::Id:
3346#include "clang/Basic/OpenCLImageTypes.def"
3347#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3348 case BuiltinType::Id:
3349#include "clang/Basic/OpenCLExtensionTypes.def"
3350 case BuiltinType::OCLSampler:
3351 case BuiltinType::OCLEvent:
3352 case BuiltinType::OCLClkEvent:
3353 case BuiltinType::OCLQueue:
3354 case BuiltinType::OCLReserveID:
3355#define SVE_TYPE(Name, Id, SingletonId) \
3356 case BuiltinType::Id:
3357#include "clang/Basic/AArch64SVEACLETypes.def"
3358#define PPC_VECTOR_TYPE(Name, Id, Size) \
3359 case BuiltinType::Id:
3360#include "clang/Basic/PPCTypes.def"
3361#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3362#include "clang/Basic/RISCVVTypes.def"
3363#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3364#include "clang/Basic/WebAssemblyReferenceTypes.def"
3365#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3366#include "clang/Basic/AMDGPUTypes.def"
3367 case BuiltinType::ShortAccum:
3368 case BuiltinType::Accum:
3369 case BuiltinType::LongAccum:
3370 case BuiltinType::UShortAccum:
3371 case BuiltinType::UAccum:
3372 case BuiltinType::ULongAccum:
3373 case BuiltinType::ShortFract:
3374 case BuiltinType::Fract:
3375 case BuiltinType::LongFract:
3376 case BuiltinType::UShortFract:
3377 case BuiltinType::UFract:
3378 case BuiltinType::ULongFract:
3379 case BuiltinType::SatShortAccum:
3380 case BuiltinType::SatAccum:
3381 case BuiltinType::SatLongAccum:
3382 case BuiltinType::SatUShortAccum:
3383 case BuiltinType::SatUAccum:
3384 case BuiltinType::SatULongAccum:
3385 case BuiltinType::SatShortFract:
3386 case BuiltinType::SatFract:
3387 case BuiltinType::SatLongFract:
3388 case BuiltinType::SatUShortFract:
3389 case BuiltinType::SatUFract:
3390 case BuiltinType::SatULongFract:
3391 case BuiltinType::BFloat16:
3392 return false;
3393
3394 case BuiltinType::Dependent:
3395#define BUILTIN_TYPE(Id, SingletonId)
3396#define PLACEHOLDER_TYPE(Id, SingletonId) \
3397 case BuiltinType::Id:
3398#include "clang/AST/BuiltinTypes.def"
3399 llvm_unreachable("asking for RRTI for a placeholder type!");
3400
3401 case BuiltinType::ObjCId:
3402 case BuiltinType::ObjCClass:
3403 case BuiltinType::ObjCSel:
3404 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3405 }
3406
3407 llvm_unreachable("Invalid BuiltinType Kind!");
3408}
3409
3410static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3411 QualType PointeeTy = PointerTy->getPointeeType();
3412 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3413 if (!BuiltinTy)
3414 return false;
3415
3416 // Check the qualifiers.
3417 Qualifiers Quals = PointeeTy.getQualifiers();
3418 Quals.removeConst();
3419
3420 if (!Quals.empty())
3421 return false;
3422
3423 return TypeInfoIsInStandardLibrary(BuiltinTy);
3424}
3425
3426/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3427/// information for the given type exists in the standard library.
3429 // Type info for builtin types is defined in the standard library.
3430 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3431 return TypeInfoIsInStandardLibrary(BuiltinTy);
3432
3433 // Type info for some pointer types to builtin types is defined in the
3434 // standard library.
3435 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3436 return TypeInfoIsInStandardLibrary(PointerTy);
3437
3438 return false;
3439}
3440
3441/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3442/// the given type exists somewhere else, and that we should not emit the type
3443/// information in this translation unit. Assumes that it is not a
3444/// standard-library type.
3446 QualType Ty) {
3447 ASTContext &Context = CGM.getContext();
3448
3449 // If RTTI is disabled, assume it might be disabled in the
3450 // translation unit that defines any potential key function, too.
3451 if (!Context.getLangOpts().RTTI) return false;
3452
3453 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3454 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3455 if (!RD->hasDefinition())
3456 return false;
3457
3458 if (!RD->isDynamicClass())
3459 return false;
3460
3461 // FIXME: this may need to be reconsidered if the key function
3462 // changes.
3463 // N.B. We must always emit the RTTI data ourselves if there exists a key
3464 // function.
3465 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3466
3467 // Don't import the RTTI but emit it locally.
3468 if (CGM.getTriple().isWindowsGNUEnvironment())
3469 return false;
3470
3471 if (CGM.getVTables().isVTableExternal(RD)) {
3472 if (CGM.getTarget().hasPS4DLLImportExport())
3473 return true;
3474
3475 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3476 ? false
3477 : true;
3478 }
3479 if (IsDLLImport)
3480 return true;
3481 }
3482
3483 return false;
3484}
3485
3486/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3487static bool IsIncompleteClassType(const RecordType *RecordTy) {
3488 return !RecordTy->getDecl()->isCompleteDefinition();
3489}
3490
3491/// ContainsIncompleteClassType - Returns whether the given type contains an
3492/// incomplete class type. This is true if
3493///
3494/// * The given type is an incomplete class type.
3495/// * The given type is a pointer type whose pointee type contains an
3496/// incomplete class type.
3497/// * The given type is a member pointer type whose class is an incomplete
3498/// class type.
3499/// * The given type is a member pointer type whoise pointee type contains an
3500/// incomplete class type.
3501/// is an indirect or direct pointer to an incomplete class type.
3503 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3504 if (IsIncompleteClassType(RecordTy))
3505 return true;
3506 }
3507
3508 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3509 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3510
3511 if (const MemberPointerType *MemberPointerTy =
3512 dyn_cast<MemberPointerType>(Ty)) {
3513 // Check if the class type is incomplete.
3514 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3515 if (IsIncompleteClassType(ClassType))
3516 return true;
3517
3518 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3519 }
3520
3521 return false;
3522}
3523
3524// CanUseSingleInheritance - Return whether the given record decl has a "single,
3525// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3526// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3528 // Check the number of bases.
3529 if (RD->getNumBases() != 1)
3530 return false;
3531
3532 // Get the base.
3534
3535 // Check that the base is not virtual.
3536 if (Base->isVirtual())
3537 return false;
3538
3539 // Check that the base is public.
3540 if (Base->getAccessSpecifier() != AS_public)
3541 return false;
3542
3543 // Check that the class is dynamic iff the base is.
3544 auto *BaseDecl =
3545 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3546 if (!BaseDecl->isEmpty() &&
3547 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3548 return false;
3549
3550 return true;
3551}
3552
3553void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3554 // abi::__class_type_info.
3555 static const char * const ClassTypeInfo =
3556 "_ZTVN10__cxxabiv117__class_type_infoE";
3557 // abi::__si_class_type_info.
3558 static const char * const SIClassTypeInfo =
3559 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3560 // abi::__vmi_class_type_info.
3561 static const char * const VMIClassTypeInfo =
3562 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3563
3564 const char *VTableName = nullptr;
3565
3566 switch (Ty->getTypeClass()) {
3567#define TYPE(Class, Base)
3568#define ABSTRACT_TYPE(Class, Base)
3569#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3570#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3571#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3572#include "clang/AST/TypeNodes.inc"
3573 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3574
3575 case Type::LValueReference:
3576 case Type::RValueReference:
3577 llvm_unreachable("References shouldn't get here");
3578
3579 case Type::Auto:
3580 case Type::DeducedTemplateSpecialization:
3581 llvm_unreachable("Undeduced type shouldn't get here");
3582
3583 case Type::Pipe:
3584 llvm_unreachable("Pipe types shouldn't get here");
3585
3586 case Type::ArrayParameter:
3587 llvm_unreachable("Array Parameter types should not get here.");
3588
3589 case Type::Builtin:
3590 case Type::BitInt:
3591 // GCC treats vector and complex types as fundamental types.
3592 case Type::Vector:
3593 case Type::ExtVector:
3594 case Type::ConstantMatrix:
3595 case Type::Complex:
3596 case Type::Atomic:
3597 // FIXME: GCC treats block pointers as fundamental types?!
3598 case Type::BlockPointer:
3599 // abi::__fundamental_type_info.
3600 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3601 break;
3602
3603 case Type::ConstantArray:
3604 case Type::IncompleteArray:
3605 case Type::VariableArray:
3606 // abi::__array_type_info.
3607 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3608 break;
3609
3610 case Type::FunctionNoProto:
3611 case Type::FunctionProto:
3612 // abi::__function_type_info.
3613 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3614 break;
3615
3616 case Type::Enum:
3617 // abi::__enum_type_info.
3618 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3619 break;
3620
3621 case Type::Record: {
3622 const CXXRecordDecl *RD =
3623 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3624
3625 if (!RD->hasDefinition() || !RD->getNumBases()) {
3626 VTableName = ClassTypeInfo;
3627 } else if (CanUseSingleInheritance(RD)) {
3628 VTableName = SIClassTypeInfo;
3629 } else {
3630 VTableName = VMIClassTypeInfo;
3631 }
3632
3633 break;
3634 }
3635
3636 case Type::ObjCObject:
3637 // Ignore protocol qualifiers.
3638 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3639
3640 // Handle id and Class.
3641 if (isa<BuiltinType>(Ty)) {
3642 VTableName = ClassTypeInfo;
3643 break;
3644 }
3645
3646 assert(isa<ObjCInterfaceType>(Ty));
3647 [[fallthrough]];
3648
3649 case Type::ObjCInterface:
3650 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3651 VTableName = SIClassTypeInfo;
3652 } else {
3653 VTableName = ClassTypeInfo;
3654 }
3655 break;
3656
3657 case Type::ObjCObjectPointer:
3658 case Type::Pointer:
3659 // abi::__pointer_type_info.
3660 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3661 break;
3662
3663 case Type::MemberPointer:
3664 // abi::__pointer_to_member_type_info.
3665 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3666 break;
3667 }
3668
3669 llvm::Constant *VTable = nullptr;
3670
3671 // Check if the alias exists. If it doesn't, then get or create the global.
3673 VTable = CGM.getModule().getNamedAlias(VTableName);
3674 if (!VTable) {
3675 llvm::Type *Ty = llvm::ArrayType::get(CGM.GlobalsInt8PtrTy, 0);
3676 VTable = CGM.getModule().getOrInsertGlobal(VTableName, Ty);
3677 }
3678
3679 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3680
3681 llvm::Type *PtrDiffTy =
3683
3684 // The vtable address point is 2.
3686 // The vtable address point is 8 bytes after its start:
3687 // 4 for the offset to top + 4 for the relative offset to rtti.
3688 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3689 VTable =
3690 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3691 } else {
3692 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3693 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy,
3694 VTable, Two);
3695 }
3696
3697 Fields.push_back(VTable);
3698}
3699
3700/// Return the linkage that the type info and type info name constants
3701/// should have for the given type.
3702static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3703 QualType Ty) {
3704 // Itanium C++ ABI 2.9.5p7:
3705 // In addition, it and all of the intermediate abi::__pointer_type_info
3706 // structs in the chain down to the abi::__class_type_info for the
3707 // incomplete class type must be prevented from resolving to the
3708 // corresponding type_info structs for the complete class type, possibly
3709 // by making them local static objects. Finally, a dummy class RTTI is
3710 // generated for the incomplete type that will not resolve to the final
3711 // complete class RTTI (because the latter need not exist), possibly by
3712 // making it a local static object.
3714 return llvm::GlobalValue::InternalLinkage;
3715
3716 switch (Ty->getLinkage()) {
3717 case Linkage::Invalid:
3718 llvm_unreachable("Linkage hasn't been computed!");
3719
3720 case Linkage::None:
3721 case Linkage::Internal:
3723 return llvm::GlobalValue::InternalLinkage;
3724
3726 case Linkage::Module:
3727 case Linkage::External:
3728 // RTTI is not enabled, which means that this type info struct is going
3729 // to be used for exception handling. Give it linkonce_odr linkage.
3730 if (!CGM.getLangOpts().RTTI)
3731 return llvm::GlobalValue::LinkOnceODRLinkage;
3732
3733 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3734 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3735 if (RD->hasAttr<WeakAttr>())
3736 return llvm::GlobalValue::WeakODRLinkage;
3737 if (CGM.getTriple().isWindowsItaniumEnvironment())
3738 if (RD->hasAttr<DLLImportAttr>() &&
3740 return llvm::GlobalValue::ExternalLinkage;
3741 // MinGW always uses LinkOnceODRLinkage for type info.
3742 if (RD->isDynamicClass() &&
3743 !CGM.getContext()
3744 .getTargetInfo()
3745 .getTriple()
3746 .isWindowsGNUEnvironment())
3747 return CGM.getVTableLinkage(RD);
3748 }
3749
3750 return llvm::GlobalValue::LinkOnceODRLinkage;
3751 }
3752
3753 llvm_unreachable("Invalid linkage!");
3754}
3755
3756llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3757 // We want to operate on the canonical type.
3758 Ty = Ty.getCanonicalType();
3759
3760 // Check if we've already emitted an RTTI descriptor for this type.
3761 SmallString<256> Name;
3762 llvm::raw_svector_ostream Out(Name);
3763 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3764
3765 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3766 if (OldGV && !OldGV->isDeclaration()) {
3767 assert(!OldGV->hasAvailableExternallyLinkage() &&
3768 "available_externally typeinfos not yet implemented");
3769
3770 return OldGV;
3771 }
3772
3773 // Check if there is already an external RTTI descriptor for this type.
3776 return GetAddrOfExternalRTTIDescriptor(Ty);
3777
3778 // Emit the standard library with external linkage.
3779 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3780
3781 // Give the type_info object and name the formal visibility of the
3782 // type itself.
3783 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3784 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3785 // If the linkage is local, only default visibility makes sense.
3786 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3787 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3788 ItaniumCXXABI::RUK_NonUniqueHidden)
3789 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3790 else
3791 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3792
3793 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3794 llvm::GlobalValue::DefaultStorageClass;
3795 if (auto RD = Ty->getAsCXXRecordDecl()) {
3796 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3797 RD->hasAttr<DLLExportAttr>()) ||
3799 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3800 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3801 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3802 }
3803 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3804}
3805
3806llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3807 QualType Ty,
3808 llvm::GlobalVariable::LinkageTypes Linkage,
3809 llvm::GlobalValue::VisibilityTypes Visibility,
3810 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3811 // Add the vtable pointer.
3812 BuildVTablePointer(cast<Type>(Ty));
3813
3814 // And the name.
3815 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3816 llvm::Constant *TypeNameField;
3817
3818 // If we're supposed to demote the visibility, be sure to set a flag
3819 // to use a string comparison for type_info comparisons.
3820 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3821 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3822 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3823 // The flag is the sign bit, which on ARM64 is defined to be clear
3824 // for global pointers. This is very ARM64-specific.
3825 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3826 llvm::Constant *flag =
3827 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3828 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3829 TypeNameField =
3830 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy);
3831 } else {
3832 TypeNameField = TypeName;
3833 }
3834 Fields.push_back(TypeNameField);
3835
3836 switch (Ty->getTypeClass()) {
3837#define TYPE(Class, Base)
3838#define ABSTRACT_TYPE(Class, Base)
3839#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3840#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3841#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3842#include "clang/AST/TypeNodes.inc"
3843 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3844
3845 // GCC treats vector types as fundamental types.
3846 case Type::Builtin:
3847 case Type::Vector:
3848 case Type::ExtVector:
3849 case Type::ConstantMatrix:
3850 case Type::Complex:
3851 case Type::BlockPointer:
3852 // Itanium C++ ABI 2.9.5p4:
3853 // abi::__fundamental_type_info adds no data members to std::type_info.
3854 break;
3855
3856 case Type::LValueReference:
3857 case Type::RValueReference:
3858 llvm_unreachable("References shouldn't get here");
3859
3860 case Type::Auto:
3861 case Type::DeducedTemplateSpecialization:
3862 llvm_unreachable("Undeduced type shouldn't get here");
3863
3864 case Type::Pipe:
3865 break;
3866
3867 case Type::BitInt:
3868 break;
3869
3870 case Type::ConstantArray:
3871 case Type::IncompleteArray:
3872 case Type::VariableArray:
3873 case Type::ArrayParameter:
3874 // Itanium C++ ABI 2.9.5p5:
3875 // abi::__array_type_info adds no data members to std::type_info.
3876 break;
3877
3878 case Type::FunctionNoProto:
3879 case Type::FunctionProto:
3880 // Itanium C++ ABI 2.9.5p5:
3881 // abi::__function_type_info adds no data members to std::type_info.
3882 break;
3883
3884 case Type::Enum:
3885 // Itanium C++ ABI 2.9.5p5:
3886 // abi::__enum_type_info adds no data members to std::type_info.
3887 break;
3888
3889 case Type::Record: {
3890 const CXXRecordDecl *RD =
3891 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3892 if (!RD->hasDefinition() || !RD->getNumBases()) {
3893 // We don't need to emit any fields.
3894 break;
3895 }
3896
3898 BuildSIClassTypeInfo(RD);
3899 else
3900 BuildVMIClassTypeInfo(RD);
3901
3902 break;
3903 }
3904
3905 case Type::ObjCObject:
3906 case Type::ObjCInterface:
3907 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3908 break;
3909
3910 case Type::ObjCObjectPointer:
3911 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3912 break;
3913
3914 case Type::Pointer:
3915 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3916 break;
3917
3918 case Type::MemberPointer:
3919 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3920 break;
3921
3922 case Type::Atomic:
3923 // No fields, at least for the moment.
3924 break;
3925 }
3926
3927 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3928
3929 SmallString<256> Name;
3930 llvm::raw_svector_ostream Out(Name);
3931 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3932 llvm::Module &M = CGM.getModule();
3933 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3934 llvm::GlobalVariable *GV =
3935 new llvm::GlobalVariable(M, Init->getType(),
3936 /*isConstant=*/true, Linkage, Init, Name);
3937
3938 // Export the typeinfo in the same circumstances as the vtable is exported.
3939 auto GVDLLStorageClass = DLLStorageClass;
3940 if (CGM.getTarget().hasPS4DLLImportExport() &&
3941 GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
3942 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3943 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3944 if (RD->hasAttr<DLLExportAttr>() ||
3945 CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
3946 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3947 }
3948 }
3949
3950 // If there's already an old global variable, replace it with the new one.
3951 if (OldGV) {
3952 GV->takeName(OldGV);
3953 OldGV->replaceAllUsesWith(GV);
3954 OldGV->eraseFromParent();
3955 }
3956
3957 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3958 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3959
3962 GV->setAlignment(Align.getAsAlign());
3963
3964 // The Itanium ABI specifies that type_info objects must be globally
3965 // unique, with one exception: if the type is an incomplete class
3966 // type or a (possibly indirect) pointer to one. That exception
3967 // affects the general case of comparing type_info objects produced
3968 // by the typeid operator, which is why the comparison operators on
3969 // std::type_info generally use the type_info name pointers instead
3970 // of the object addresses. However, the language's built-in uses
3971 // of RTTI generally require class types to be complete, even when
3972 // manipulating pointers to those class types. This allows the
3973 // implementation of dynamic_cast to rely on address equality tests,
3974 // which is much faster.
3975
3976 // All of this is to say that it's important that both the type_info
3977 // object and the type_info name be uniqued when weakly emitted.
3978
3979 TypeName->setVisibility(Visibility);
3980 CGM.setDSOLocal(TypeName);
3981
3982 GV->setVisibility(Visibility);
3983 CGM.setDSOLocal(GV);
3984
3985 TypeName->setDLLStorageClass(DLLStorageClass);
3986 GV->setDLLStorageClass(GVDLLStorageClass);
3987
3988 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3989 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3990
3991 return GV;
3992}
3993
3994/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3995/// for the given Objective-C object type.
3996void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3997 // Drop qualifiers.
3998 const Type *T = OT->getBaseType().getTypePtr();
3999 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
4000
4001 // The builtin types are abi::__class_type_infos and don't require
4002 // extra fields.
4003 if (isa<BuiltinType>(T)) return;
4004
4005 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
4006 ObjCInterfaceDecl *Super = Class->getSuperClass();
4007
4008 // Root classes are also __class_type_info.
4009 if (!Super) return;
4010
4011 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
4012
4013 // Everything else is single inheritance.
4014 llvm::Constant *BaseTypeInfo =
4015 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
4016 Fields.push_back(BaseTypeInfo);
4017}
4018
4019/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4020/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4021void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4022 // Itanium C++ ABI 2.9.5p6b:
4023 // It adds to abi::__class_type_info a single member pointing to the
4024 // type_info structure for the base type,
4025 llvm::Constant *BaseTypeInfo =
4026 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
4027 Fields.push_back(BaseTypeInfo);
4028}
4029
4030namespace {
4031 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4032 /// a class hierarchy.
4033 struct SeenBases {
4036 };
4037}
4038
4039/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4040/// abi::__vmi_class_type_info.
4041///
4043 SeenBases &Bases) {
4044
4045 unsigned Flags = 0;
4046
4047 auto *BaseDecl =
4048 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
4049
4050 if (Base->isVirtual()) {
4051 // Mark the virtual base as seen.
4052 if (!Bases.VirtualBases.insert(BaseDecl).second) {
4053 // If this virtual base has been seen before, then the class is diamond
4054 // shaped.
4055 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4056 } else {
4057 if (Bases.NonVirtualBases.count(BaseDecl))
4058 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4059 }
4060 } else {
4061 // Mark the non-virtual base as seen.
4062 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
4063 // If this non-virtual base has been seen before, then the class has non-
4064 // diamond shaped repeated inheritance.
4065 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4066 } else {
4067 if (Bases.VirtualBases.count(BaseDecl))
4068 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4069 }
4070 }
4071
4072 // Walk all bases.
4073 for (const auto &I : BaseDecl->bases())
4074 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4075
4076 return Flags;
4077}
4078
4080 unsigned Flags = 0;
4081 SeenBases Bases;
4082
4083 // Walk all bases.
4084 for (const auto &I : RD->bases())
4085 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4086
4087 return Flags;
4088}
4089
4090/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4091/// classes with bases that do not satisfy the abi::__si_class_type_info
4092/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4093void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4094 llvm::Type *UnsignedIntLTy =
4096
4097 // Itanium C++ ABI 2.9.5p6c:
4098 // __flags is a word with flags describing details about the class
4099 // structure, which may be referenced by using the __flags_masks
4100 // enumeration. These flags refer to both direct and indirect bases.
4101 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4102 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4103
4104 // Itanium C++ ABI 2.9.5p6c:
4105 // __base_count is a word with the number of direct proper base class
4106 // descriptions that follow.
4107 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4108
4109 if (!RD->getNumBases())
4110 return;
4111
4112 // Now add the base class descriptions.
4113
4114 // Itanium C++ ABI 2.9.5p6c:
4115 // __base_info[] is an array of base class descriptions -- one for every
4116 // direct proper base. Each description is of the type:
4117 //
4118 // struct abi::__base_class_type_info {
4119 // public:
4120 // const __class_type_info *__base_type;
4121 // long __offset_flags;
4122 //
4123 // enum __offset_flags_masks {
4124 // __virtual_mask = 0x1,
4125 // __public_mask = 0x2,
4126 // __offset_shift = 8
4127 // };
4128 // };
4129
4130 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4131 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4132 // LLP64 platforms.
4133 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4134 // LLP64 platforms.
4135 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4136 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4137 if (TI.getTriple().isOSCygMing() &&
4139 OffsetFlagsTy = CGM.getContext().LongLongTy;
4140 llvm::Type *OffsetFlagsLTy =
4141 CGM.getTypes().ConvertType(OffsetFlagsTy);
4142
4143 for (const auto &Base : RD->bases()) {
4144 // The __base_type member points to the RTTI for the base type.
4145 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4146
4147 auto *BaseDecl =
4148 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4149
4150 int64_t OffsetFlags = 0;
4151
4152 // All but the lower 8 bits of __offset_flags are a signed offset.
4153 // For a non-virtual base, this is the offset in the object of the base
4154 // subobject. For a virtual base, this is the offset in the virtual table of
4155 // the virtual base offset for the virtual base referenced (negative).
4156 CharUnits Offset;
4157 if (Base.isVirtual())
4158 Offset =
4160 else {
4161 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4162 Offset = Layout.getBaseClassOffset(BaseDecl);
4163 };
4164
4165 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4166
4167 // The low-order byte of __offset_flags contains flags, as given by the
4168 // masks from the enumeration __offset_flags_masks.
4169 if (Base.isVirtual())
4170 OffsetFlags |= BCTI_Virtual;
4171 if (Base.getAccessSpecifier() == AS_public)
4172 OffsetFlags |= BCTI_Public;
4173
4174 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4175 }
4176}
4177
4178/// Compute the flags for a __pbase_type_info, and remove the corresponding
4179/// pieces from \p Type.
4181 unsigned Flags = 0;
4182
4183 if (Type.isConstQualified())
4184 Flags |= ItaniumRTTIBuilder::PTI_Const;
4185 if (Type.isVolatileQualified())
4186 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4187 if (Type.isRestrictQualified())
4188 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4189 Type = Type.getUnqualifiedType();
4190
4191 // Itanium C++ ABI 2.9.5p7:
4192 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4193 // incomplete class type, the incomplete target type flag is set.
4195 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4196
4197 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4198 if (Proto->isNothrow()) {
4199 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4201 }
4202 }
4203
4204 return Flags;
4205}
4206
4207/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4208/// used for pointer types.
4209void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4210 // Itanium C++ ABI 2.9.5p7:
4211 // __flags is a flag word describing the cv-qualification and other
4212 // attributes of the type pointed to
4213 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4214
4215 llvm::Type *UnsignedIntLTy =
4217 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4218
4219 // Itanium C++ ABI 2.9.5p7:
4220 // __pointee is a pointer to the std::type_info derivation for the
4221 // unqualified type being pointed to.
4222 llvm::Constant *PointeeTypeInfo =
4223 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4224 Fields.push_back(PointeeTypeInfo);
4225}
4226
4227/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4228/// struct, used for member pointer types.
4229void
4230ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4231 QualType PointeeTy = Ty->getPointeeType();
4232
4233 // Itanium C++ ABI 2.9.5p7:
4234 // __flags is a flag word describing the cv-qualification and other
4235 // attributes of the type pointed to.
4236 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4237
4238 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4239 if (IsIncompleteClassType(ClassType))
4240 Flags |= PTI_ContainingClassIncomplete;
4241
4242 llvm::Type *UnsignedIntLTy =
4244 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4245
4246 // Itanium C++ ABI 2.9.5p7:
4247 // __pointee is a pointer to the std::type_info derivation for the
4248 // unqualified type being pointed to.
4249 llvm::Constant *PointeeTypeInfo =
4250 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4251 Fields.push_back(PointeeTypeInfo);
4252
4253 // Itanium C++ ABI 2.9.5p9:
4254 // __context is a pointer to an abi::__class_type_info corresponding to the
4255 // class type containing the member pointed to
4256 // (e.g., the "A" in "int A::*").
4257 Fields.push_back(
4258 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4259}
4260
4261llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4262 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4263}
4264
4265void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4266 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4267 QualType FundamentalTypes[] = {
4268 getContext().VoidTy, getContext().NullPtrTy,
4269 getContext().BoolTy, getContext().WCharTy,
4270 getContext().CharTy, getContext().UnsignedCharTy,
4271 getContext().SignedCharTy, getContext().ShortTy,
4272 getContext().UnsignedShortTy, getContext().IntTy,
4273 getContext().UnsignedIntTy, getContext().LongTy,
4274 getContext().UnsignedLongTy, getContext().LongLongTy,
4275 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4276 getContext().UnsignedInt128Ty, getContext().HalfTy,
4277 getContext().FloatTy, getContext().DoubleTy,
4278 getContext().LongDoubleTy, getContext().Float128Ty,
4279 getContext().Char8Ty, getContext().Char16Ty,
4280 getContext().Char32Ty
4281 };
4282 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4283 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4284 ? llvm::GlobalValue::DLLExportStorageClass
4285 : llvm::GlobalValue::DefaultStorageClass;
4286 llvm::GlobalValue::VisibilityTypes Visibility =
4288 for (const QualType &FundamentalType : FundamentalTypes) {
4289 QualType PointerType = getContext().getPointerType(FundamentalType);
4290 QualType PointerTypeConst = getContext().getPointerType(
4291 FundamentalType.withConst());
4292 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4293 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4294 Type, llvm::GlobalValue::ExternalLinkage,
4295 Visibility, DLLStorageClass);
4296 }
4297}
4298
4299/// What sort of uniqueness rules should we use for the RTTI for the
4300/// given type?
4301ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4302 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4303 if (shouldRTTIBeUnique())
4304 return RUK_Unique;
4305
4306 // It's only necessary for linkonce_odr or weak_odr linkage.
4307 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4308 Linkage != llvm::GlobalValue::WeakODRLinkage)
4309 return RUK_Unique;
4310
4311 // It's only necessary with default visibility.
4312 if (CanTy->getVisibility() != DefaultVisibility)
4313 return RUK_Unique;
4314
4315 // If we're not required to publish this symbol, hide it.
4316 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4317 return RUK_NonUniqueHidden;
4318
4319 // If we're required to publish this symbol, as we might be under an
4320 // explicit instantiation, leave it with default visibility but
4321 // enable string-comparisons.
4322 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4323 return RUK_NonUniqueVisible;
4324}
4325
4326// Find out how to codegen the complete destructor and constructor
4327namespace {
4328enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4329}
4330static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4331 const CXXMethodDecl *MD) {
4332 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4333 return StructorCodegen::Emit;
4334
4335 // The complete and base structors are not equivalent if there are any virtual
4336 // bases, so emit separate functions.
4337 if (MD->getParent()->getNumVBases())
4338 return StructorCodegen::Emit;
4339
4341 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4343 } else {
4344 const auto *CD = cast<CXXConstructorDecl>(MD);
4346 }
4347 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4348
4349 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4350 return StructorCodegen::RAUW;
4351
4352 // FIXME: Should we allow available_externally aliases?
4353 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4354 return StructorCodegen::RAUW;
4355
4356 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4357 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4358 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4359 CGM.getTarget().getTriple().isOSBinFormatWasm())
4360 return StructorCodegen::COMDAT;
4361 return StructorCodegen::Emit;
4362 }
4363
4364 return StructorCodegen::Alias;
4365}
4366
4369 GlobalDecl TargetDecl) {
4370 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4371
4372 StringRef MangledName = CGM.getMangledName(AliasDecl);
4373 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4374 if (Entry && !Entry->isDeclaration())
4375 return;
4376
4377 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4378
4379 // Create the alias with no name.
4380 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4381
4382 // Constructors and destructors are always unnamed_addr.
4383 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4384
4385 // Switch any previous uses to the alias.
4386 if (Entry) {
4387 assert(Entry->getType() == Aliasee->getType() &&
4388 "declaration exists with different type");
4389 Alias->takeName(Entry);
4390 Entry->replaceAllUsesWith(Alias);
4391 Entry->eraseFromParent();
4392 } else {
4393 Alias->setName(MangledName);
4394 }
4395
4396 // Finally, set up the alias with its proper name and attributes.
4397 CGM.SetCommonAttributes(AliasDecl, Alias);
4398}
4399
4400void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4401 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4402 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4403 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4404
4405 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4406
4407 if (CD ? GD.getCtorType() == Ctor_Complete
4408 : GD.getDtorType() == Dtor_Complete) {
4409 GlobalDecl BaseDecl;
4410 if (CD)
4411 BaseDecl = GD.getWithCtorType(Ctor_Base);
4412 else
4413 BaseDecl = GD.getWithDtorType(Dtor_Base);
4414
4415 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4416 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4417 return;
4418 }
4419
4420 if (CGType == StructorCodegen::RAUW) {
4421 StringRef MangledName = CGM.getMangledName(GD);
4422 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4423 CGM.addReplacement(MangledName, Aliasee);
4424 return;
4425 }
4426 }
4427
4428 // The base destructor is equivalent to the base destructor of its
4429 // base class if there is exactly one non-virtual base class with a
4430 // non-trivial destructor, there are no fields with a non-trivial
4431 // destructor, and the body of the destructor is trivial.
4432 if (DD && GD.getDtorType() == Dtor_Base &&
4433 CGType != StructorCodegen::COMDAT &&
4435 return;
4436
4437 // FIXME: The deleting destructor is equivalent to the selected operator
4438 // delete if:
4439 // * either the delete is a destroying operator delete or the destructor
4440 // would be trivial if it weren't virtual,
4441 // * the conversion from the 'this' parameter to the first parameter of the
4442 // destructor is equivalent to a bitcast,
4443 // * the destructor does not have an implicit "this" return, and
4444 // * the operator delete has the same calling convention and IR function type
4445 // as the destructor.
4446 // In such cases we should try to emit the deleting dtor as an alias to the
4447 // selected 'operator delete'.
4448
4449 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4450
4451 if (CGType == StructorCodegen::COMDAT) {
4452 SmallString<256> Buffer;
4453 llvm::raw_svector_ostream Out(Buffer);
4454 if (DD)
4455 getMangleContext().mangleCXXDtorComdat(DD, Out);
4456 else
4457 getMangleContext().mangleCXXCtorComdat(CD, Out);
4458 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4459 Fn->setComdat(C);
4460 } else {
4461 CGM.maybeSetTrivialComdat(*MD, *Fn);
4462 }
4463}
4464
4465static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4466 // void *__cxa_begin_catch(void*);
4467 llvm::FunctionType *FTy = llvm::FunctionType::get(
4468 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4469
4470 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4471}
4472
4473static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4474 // void __cxa_end_catch();
4475 llvm::FunctionType *FTy =
4476 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4477
4478 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4479}
4480
4481static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4482 // void *__cxa_get_exception_ptr(void*);
4483 llvm::FunctionType *FTy = llvm::FunctionType::get(
4484 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4485
4486 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4487}
4488
4489namespace {
4490 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4491 /// exception type lets us state definitively that the thrown exception
4492 /// type does not have a destructor. In particular:
4493 /// - Catch-alls tell us nothing, so we have to conservatively
4494 /// assume that the thrown exception might have a destructor.
4495 /// - Catches by reference behave according to their base types.
4496 /// - Catches of non-record types will only trigger for exceptions
4497 /// of non-record types, which never have destructors.
4498 /// - Catches of record types can trigger for arbitrary subclasses
4499 /// of the caught type, so we have to assume the actual thrown
4500 /// exception type might have a throwing destructor, even if the
4501 /// caught type's destructor is trivial or nothrow.
4502 struct CallEndCatch final : EHScopeStack::Cleanup {
4503 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4504 bool MightThrow;
4505
4506 void Emit(CodeGenFunction &CGF, Flags flags) override {
4507 if (!MightThrow) {
4509 return;
4510 }
4511
4513 }
4514 };
4515}
4516
4517/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4518/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
4519/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
4520/// call can be marked as nounwind even if EndMightThrow is true.
4521///
4522/// \param EndMightThrow - true if __cxa_end_catch might throw
4523static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4524 llvm::Value *Exn,
4525 bool EndMightThrow) {
4526 llvm::CallInst *call =
4528
4529 CGF.EHStack.pushCleanup<CallEndCatch>(
4531 EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
4532
4533 return call;
4534}
4535
4536/// A "special initializer" callback for initializing a catch
4537/// parameter during catch initialization.
4539 const VarDecl &CatchParam,
4540 Address ParamAddr,
4542 // Load the exception from where the landing pad saved it.
4543 llvm::Value *Exn = CGF.getExceptionFromSlot();
4544
4545 CanQualType CatchType =
4546 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4547 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4548
4549 // If we're catching by reference, we can just cast the object
4550 // pointer to the appropriate pointer.
4551 if (isa<ReferenceType>(CatchType)) {
4552 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4553 bool EndCatchMightThrow = CaughtType->isRecordType();
4554
4555 // __cxa_begin_catch returns the adjusted object pointer.
4556 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4557
4558 // We have no way to tell the personality function that we're
4559 // catching by reference, so if we're catching a pointer,
4560 // __cxa_begin_catch will actually return that pointer by value.
4561 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4562 QualType PointeeType = PT->getPointeeType();
4563
4564 // When catching by reference, generally we should just ignore
4565 // this by-value pointer and use the exception object instead.
4566 if (!PointeeType->isRecordType()) {
4567
4568 // Exn points to the struct _Unwind_Exception header, which
4569 // we have to skip past in order to reach the exception data.
4570 unsigned HeaderSize =
4572 AdjustedExn =
4573 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4574
4575 // However, if we're catching a pointer-to-record type that won't
4576 // work, because the personality function might have adjusted
4577 // the pointer. There's actually no way for us to fully satisfy
4578 // the language/ABI contract here: we can't use Exn because it
4579 // might have the wrong adjustment, but we can't use the by-value
4580 // pointer because it's off by a level of abstraction.
4581 //
4582 // The current solution is to dump the adjusted pointer into an
4583 // alloca, which breaks language semantics (because changing the
4584 // pointer doesn't change the exception) but at least works.
4585 // The better solution would be to filter out non-exact matches
4586 // and rethrow them, but this is tricky because the rethrow
4587 // really needs to be catchable by other sites at this landing
4588 // pad. The best solution is to fix the personality function.
4589 } else {
4590 // Pull the pointer for the reference type off.
4591 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4592
4593 // Create the temporary and write the adjusted pointer into it.
4594 Address ExnPtrTmp =
4595 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4596 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4597 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4598
4599 // Bind the reference to the temporary.
4600 AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
4601 }
4602 }
4603
4604 llvm::Value *ExnCast =
4605 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4606 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4607 return;
4608 }
4609
4610 // Scalars and complexes.
4611 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4612 if (TEK != TEK_Aggregate) {
4613 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4614
4615 // If the catch type is a pointer type, __cxa_begin_catch returns
4616 // the pointer by value.
4617 if (CatchType->hasPointerRepresentation()) {
4618 llvm::Value *CastExn =
4619 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4620
4621 switch (CatchType.getQualifiers().getObjCLifetime()) {
4623 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4624 [[fallthrough]];
4625
4629 CGF.Builder.CreateStore(CastExn, ParamAddr);
4630 return;
4631
4633 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4634 return;
4635 }
4636 llvm_unreachable("bad ownership qualifier!");
4637 }
4638
4639 // Otherwise, it returns a pointer into the exception object.
4640
4641 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType);
4642 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4643 switch (TEK) {
4644 case TEK_Complex:
4645 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4646 /*init*/ true);
4647 return;
4648 case TEK_Scalar: {
4649 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4650 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4651 return;
4652 }
4653 case TEK_Aggregate:
4654 llvm_unreachable("evaluation kind filtered out!");
4655 }
4656 llvm_unreachable("bad evaluation kind");
4657 }
4658
4659 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4660 auto catchRD = CatchType->getAsCXXRecordDecl();
4661 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4662
4663 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
4664
4665 // Check for a copy expression. If we don't have a copy expression,
4666 // that means a trivial copy is okay.
4667 const Expr *copyExpr = CatchParam.getInit();
4668 if (!copyExpr) {
4669 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4670 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4671 LLVMCatchTy, caughtExnAlignment);
4672 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4673 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4674 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4675 return;
4676 }
4677
4678 // We have to call __cxa_get_exception_ptr to get the adjusted
4679 // pointer before copying.
4680 llvm::CallInst *rawAdjustedExn =
4682
4683 // Cast that to the appropriate type.
4684 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4685 LLVMCatchTy, caughtExnAlignment);
4686
4687 // The copy expression is defined in terms of an OpaqueValueExpr.
4688 // Find it and map it to the adjusted expression.
4689 CodeGenFunction::OpaqueValueMapping
4690 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4691 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4692
4693 // Call the copy ctor in a terminate scope.
4694 CGF.EHStack.pushTerminate();
4695
4696 // Perform the copy construction.
4697 CGF.EmitAggExpr(copyExpr,
4698 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4703
4704 // Leave the terminate scope.
4705 CGF.EHStack.popTerminate();
4706
4707 // Undo the opaque value mapping.
4708 opaque.pop();
4709
4710 // Finally we can call __cxa_begin_catch.
4711 CallBeginCatch(CGF, Exn, true);
4712}
4713
4714/// Begins a catch statement by initializing the catch variable and
4715/// calling __cxa_begin_catch.
4716void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4717 const CXXCatchStmt *S) {
4718 // We have to be very careful with the ordering of cleanups here:
4719 // C++ [except.throw]p4:
4720 // The destruction [of the exception temporary] occurs
4721 // immediately after the destruction of the object declared in
4722 // the exception-declaration in the handler.
4723 //
4724 // So the precise ordering is:
4725 // 1. Construct catch variable.
4726 // 2. __cxa_begin_catch
4727 // 3. Enter __cxa_end_catch cleanup
4728 // 4. Enter dtor cleanup
4729 //
4730 // We do this by using a slightly abnormal initialization process.
4731 // Delegation sequence:
4732 // - ExitCXXTryStmt opens a RunCleanupsScope
4733 // - EmitAutoVarAlloca creates the variable and debug info
4734 // - InitCatchParam initializes the variable from the exception
4735 // - CallBeginCatch calls __cxa_begin_catch
4736 // - CallBeginCatch enters the __cxa_end_catch cleanup
4737 // - EmitAutoVarCleanups enters the variable destructor cleanup
4738 // - EmitCXXTryStmt emits the code for the catch body
4739 // - EmitCXXTryStmt close the RunCleanupsScope
4740
4741 VarDecl *CatchParam = S->getExceptionDecl();
4742 if (!CatchParam) {
4743 llvm::Value *Exn = CGF.getExceptionFromSlot();
4744 CallBeginCatch(CGF, Exn, true);
4745 return;
4746 }
4747
4748 // Emit the local.
4749 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4750 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4751 CGF.EmitAutoVarCleanups(var);
4752}
4753
4754/// Get or define the following function:
4755/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4756/// This code is used only in C++.
4757static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4758 ASTContext &C = CGM.getContext();
4760 C.VoidTy, {C.getPointerType(C.CharTy)});
4761 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
4762 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4763 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4764 llvm::Function *fn =
4765 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4766 if (fn->empty()) {
4767 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
4769 fn->setDoesNotThrow();
4770 fn->setDoesNotReturn();
4771
4772 // What we really want is to massively penalize inlining without
4773 // forbidding it completely. The difference between that and
4774 // 'noinline' is negligible.
4775 fn->addFnAttr(llvm::Attribute::NoInline);
4776
4777 // Allow this function to be shared across translation units, but
4778 // we don't want it to turn into an exported symbol.
4779 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4780 fn->setVisibility(llvm::Function::HiddenVisibility);
4781 if (CGM.supportsCOMDAT())
4782 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4783
4784 // Set up the function.
4785 llvm::BasicBlock *entry =
4786 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4787 CGBuilderTy builder(CGM, entry);
4788
4789 // Pull the exception pointer out of the parameter list.
4790 llvm::Value *exn = &*fn->arg_begin();
4791
4792 // Call __cxa_begin_catch(exn).
4793 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4794 catchCall->setDoesNotThrow();
4795 catchCall->setCallingConv(CGM.getRuntimeCC());
4796
4797 // Call std::terminate().
4798 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4799 termCall->setDoesNotThrow();
4800 termCall->setDoesNotReturn();
4801 termCall->setCallingConv(CGM.getRuntimeCC());
4802
4803 // std::terminate cannot return.
4804 builder.CreateUnreachable();
4805 }
4806 return fnRef;
4807}
4808
4809llvm::CallInst *
4810ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4811 llvm::Value *Exn) {
4812 // In C++, we want to call __cxa_begin_catch() before terminating.
4813 if (Exn) {
4814 assert(CGF.CGM.getLangOpts().CPlusPlus);
4816 }
4817 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4818}
4819
4820std::pair<llvm::Value *, const CXXRecordDecl *>
4821ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4822 const CXXRecordDecl *RD) {
4823 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4824}
4825
4826void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4827 const CXXCatchStmt *C) {
4828 if (CGF.getTarget().hasFeature("exception-handling"))
4829 CGF.EHStack.pushCleanup<CatchRetScope>(
4830 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4831 ItaniumCXXABI::emitBeginCatch(CGF, C);
4832}
4833
4834llvm::CallInst *
4835WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4836 llvm::Value *Exn) {
4837 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4838 // the violating exception to mark it handled, but it is currently hard to do
4839 // with wasm EH instruction structure with catch/catch_all, we just call
4840 // std::terminate and ignore the violating exception as in CGCXXABI.
4841 // TODO Consider code transformation that makes calling __clang_call_terminate
4842 // possible.
4844}
4845
4846/// Register a global destructor as best as we know how.
4847void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4848 llvm::FunctionCallee Dtor,
4849 llvm::Constant *Addr) {
4850 if (D.getTLSKind() != VarDecl::TLS_None) {
4851 llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
4852
4853 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4854 llvm::FunctionType *AtExitTy =
4855 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true);
4856
4857 // Fetch the actual function.
4858 llvm::FunctionCallee AtExit =
4859 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4860
4861 // Create __dtor function for the var decl.
4862 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4863
4864 // Register above __dtor with atexit().
4865 // First param is flags and must be 0, second param is function ptr
4866 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4867 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4868
4869 // Cannot unregister TLS __dtor so done
4870 return;
4871 }
4872
4873 // Create __dtor function for the var decl.
4874 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4875
4876 // Register above __dtor with atexit().
4877 CGF.registerGlobalDtorWithAtExit(DtorStub);
4878
4879 // Emit __finalize function to unregister __dtor and (as appropriate) call
4880 // __dtor.
4881 emitCXXStermFinalizer(D, DtorStub, Addr);
4882}
4883
4884void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4885 llvm::Constant *addr) {
4886 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4887 SmallString<256> FnName;
4888 {
4889 llvm::raw_svector_ostream Out(FnName);
4890 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4891 }
4892
4893 // Create the finalization action associated with a variable.
4895 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4896 FTy, FnName.str(), FI, D.getLocation());
4897
4898 CodeGenFunction CGF(CGM);
4899
4900 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4902 D.getInit()->getExprLoc());
4903
4904 // The unatexit subroutine unregisters __dtor functions that were previously
4905 // registered by the atexit subroutine. If the referenced function is found,
4906 // the unatexit returns a value of 0, meaning that the cleanup is still
4907 // pending (and we should call the __dtor function).
4908 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4909
4910 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4911
4912 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4913 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4914
4915 // Check if unatexit returns a value of 0. If it does, jump to
4916 // DestructCallBlock, otherwise jump to EndBlock directly.
4917 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4918
4919 CGF.EmitBlock(DestructCallBlock);
4920
4921 // Emit the call to dtorStub.
4922 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4923
4924 // Make sure the call and the callee agree on calling convention.
4925 CI->setCallingConv(dtorStub->getCallingConv());
4926
4927 CGF.EmitBlock(EndBlock);
4928
4929 CGF.FinishFunction();
4930
4931 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4932 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4933 IPA->getPriority());
4935 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4936 // According to C++ [basic.start.init]p2, class template static data
4937 // members (i.e., implicitly or explicitly instantiated specializations)
4938 // have unordered initialization. As a consequence, we can put them into
4939 // their own llvm.global_dtors entry.
4940 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4941 } else {
4942 CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4943 }
4944}
#define V(N, I)
Definition: ASTContext.h:3300
static StructorCodegen getCodegenToUse(CodeGenModule &CGM, const CXXMethodDecl *MD)
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM)
Get or define the following function: void @__clang_call_terminate(i8* exn) nounwind noreturn This co...
static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD)
static llvm::Value * performTypeAdjustment(CodeGenFunction &CGF, Address InitialPtr, int64_t NonVirtualAdjustment, int64_t VirtualAdjustment, bool IsReturnAdjustment)
static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type)
Compute the flags for a __pbase_type_info, and remove the corresponding pieces from Type.
static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty)
ShouldUseExternalRTTIDescriptor - Returns whether the type information for the given type exists some...
static bool IsIncompleteClassType(const RecordType *RecordTy)
IsIncompleteClassType - Returns whether the given record type is incomplete.
static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, SeenBases &Bases)
ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in abi::__vmi_class_type_info.
static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF)
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, llvm::FunctionCallee dtor, llvm::Constant *addr, bool TLS)
Register a global destructor using __cxa_atexit.
static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM)
static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty)
Return the linkage that the type info and type info name constants should have for the given type.
static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static llvm::Function * createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, StringRef FnName)
static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM)
static bool IsStandardLibraryRTTIDescriptor(QualType Ty)
IsStandardLibraryRTTIDescriptor - Returns whether the type information for the given type exists in t...
static llvm::Value * CallBeginCatch(CodeGenFunction &CGF, llvm::Value *Exn, bool EndMightThrow)
Emits a call to __cxa_begin_catch and enters a cleanup to call __cxa_end_catch.
static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static CharUnits computeOffsetHint(ASTContext &Context, const CXXRecordDecl *Src, const CXXRecordDecl *Dst)
Compute the src2dst_offset hint as described in the Itanium C++ ABI [2.9.7].
static bool isThreadWrapperReplaceable(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
static void InitCatchParam(CodeGenFunction &CGF, const VarDecl &CatchParam, Address ParamAddr, SourceLocation Loc)
A "special initializer" callback for initializing a catch parameter during catch initialization.
static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty)
TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type info for that type is de...
static bool CanUseSingleInheritance(const CXXRecordDecl *RD)
static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM)
static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
Get the appropriate linkage for the wrapper function.
static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM)
static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM, llvm::GlobalVariable *VTable, const CXXRecordDecl *RD)
static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static bool ContainsIncompleteClassType(QualType Ty)
ContainsIncompleteClassType - Returns whether the given type contains an incomplete class type.
static void emitConstructorDestructorAlias(CodeGenModule &CGM, GlobalDecl AliasDecl, GlobalDecl TargetDecl)
static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM)
static void dtorTy(Block *, std::byte *Ptr, const Descriptor *)
Definition: Descriptor.cpp:29
int Priority
Definition: Format.cpp:2980
llvm::MachO::Record Record
Definition: MachO.h:31
static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD)
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
SourceLocation Loc
Definition: SemaObjC.cpp:758
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D)
Determine what kind of template specialization the given declaration is.
static std::optional< QualType > getPointeeType(const MemRegion *R)
#define CXXABI(Name, Str)
Definition: TargetCXXABI.h:32
C Language Family Type Representation.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
const ValueDecl * getMemberPointerDecl() const
Definition: APValue.cpp:1057
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:185
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
CanQualType LongTy
Definition: ASTContext.h:1103
QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl, ObjCInterfaceDecl *PrevDecl=nullptr) const
getObjCInterfaceType - Return the unique reference to the type for the specified ObjC interface decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2593
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1121
IdentifierTable & Idents
Definition: ASTContext.h:647
const LangOptions & getLangOpts() const
Definition: ASTContext.h:778
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType CharTy
Definition: ASTContext.h:1096
CanQualType IntTy
Definition: ASTContext.h:1103
CharUnits getExnObjectAlignment() const
Return the alignment (in bytes) of the thrown exception object.
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getPreferredTypeAlignInChars(QualType T) const
Return the PreferredAlignment of a (complete) type T, in characters.
Definition: ASTContext.h:2418
CanQualType VoidTy
Definition: ASTContext.h:1094
CanQualType UnsignedIntTy
Definition: ASTContext.h:1104
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:760
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
TargetCXXABI::Kind getCXXABIKind() const
Return the C++ ABI kind that should be used.
Definition: ASTContext.cpp:819
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
CanQualType LongLongTy
Definition: ASTContext.h:1103
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:259
This class is used for builtin types like 'int'.
Definition: Type.h:2997
Kind getKind() const
Definition: Type.h:3042
Implements C++ ABI-specific semantic analysis functions.
Definition: CXXABI.h:29
Represents a path from a specific derived class (which is not represented as part of the path) to a p...
BasePaths - Represents the set of paths from a derived class to one of its (direct or indirect) bases...
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
CXXCatchStmt - This represents a C++ catch block.
Definition: StmtCXX.h:28
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2535
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2497
FunctionDecl * getOperatorDelete() const
Definition: ExprCXX.h:2536
bool isGlobalDelete() const
Definition: ExprCXX.h:2522
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2799
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:176
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2060
bool isVirtual() const
Definition: DeclCXX.h:2115
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2186
bool isInstance() const
Definition: DeclCXX.h:2087
CXXMethodDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: DeclCXX.h:2156
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2240
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
base_class_range bases()
Definition: DeclCXX.h:619
unsigned getNumBases() const
Retrieves the number of base classes of this class.
Definition: DeclCXX.h:613
base_class_iterator bases_begin()
Definition: DeclCXX.h:626
base_class_range vbases()
Definition: DeclCXX.h:636
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition: DeclCXX.h:1222
bool isDynamicClass() const
Definition: DeclCXX.h:585
bool hasDefinition() const
Definition: DeclCXX.h:571
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:634
bool isDerivedFrom(const CXXRecordDecl *Base) const
Determine whether this class is derived from the class Base.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1206
const Expr * getSubExpr() const
Definition: ExprCXX.h:1226
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
Qualifiers getQualifiers() const
Retrieve all qualifiers.
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3483
CastKind getCastKind() const
Definition: Expr.h:3527
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
std::string SymbolPartition
The name of the partition that symbols are assigned to, specified with -fsymbol-partition (see https:...
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
CharUnits getAlignment() const
Definition: Address.h:166
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:190
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:176
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:592
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:870
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:135
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:304
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Definition: CGBuilder.h:354
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:291
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:107
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:127
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:260
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:344
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
virtual bool shouldEmitExactDynamicCast(QualType DestRecordTy)=0
virtual void EmitCXXConstructors(const CXXConstructorDecl *D)=0
Emit constructor variants required by this ABI.
virtual llvm::Constant * getAddrOfRTTIDescriptor(QualType Ty)=0
virtual llvm::Value * performReturnAdjustment(CodeGenFunction &CGF, Address Ret, const ReturnAdjustment &RA)=0
virtual llvm::Value * getVTableAddressPointInStructor(CodeGenFunction &CGF, const CXXRecordDecl *RD, BaseSubobject Base, const CXXRecordDecl *NearestVBase)=0
Get the address point of the vtable for the given base subobject while building a constructor or a de...
virtual void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C)=0
virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn)=0
virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const =0
virtual bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr)=0
Checks if ABI requires extra virtual offset for vtable field.
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)=0
Emits the guarded initializer and destructor setup for the given variable, given that it couldn't be ...
virtual void EmitCXXDestructors(const CXXDestructorDecl *D)=0
Emit destructor variants required by this ABI.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const =0
Returns true if the given destructor type should be emitted as a linkonce delegating thunk,...
virtual bool NeedsVTTParameter(GlobalDecl GD)
Return whether the given global decl needs a VTT parameter.
Definition: CGCXXABI.cpp:327
virtual llvm::CallInst * emitTerminateForUnexpectedException(CodeGenFunction &CGF, llvm::Value *Exn)
Definition: CGCXXABI.cpp:332
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Definition: CGCXXABI.h:153
@ RAA_Indirect
Pass it as a pointer to temporary memory.
Definition: CGCXXABI.h:161
virtual bool shouldTypeidBeNullChecked(QualType SrcRecordTy)=0
virtual llvm::Type * ConvertMemberPointerType(const MemberPointerType *MPT)
Find the LLVM type used to represent the given member pointer type.
Definition: CGCXXABI.cpp:43
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:105
virtual StringRef GetPureVirtualCallName()=0
Gets the pure virtual member call function.
virtual CharUnits getArrayCookieSizeImpl(QualType elementType)
Returns the extra size required in order to store the array cookie for the given type.
Definition: CGCXXABI.cpp:221
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const =0
Determine whether it's possible to emit a vtable for RD, even though we do not know that the vtable h...
virtual StringRef GetDeletedVirtualCallName()=0
Gets the deleted virtual member call name.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
bool isEmittedWithConstantInitializer(const VarDecl *VD, bool InspectInitForWeakDef=false) const
Determine whether we will definitely emit this variable with a constant initializer,...
Definition: CGCXXABI.cpp:176
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual llvm::Constant * EmitMemberPointer(const APValue &MP, QualType MPT)
Create a member pointer for the given member pointer constant.
Definition: CGCXXABI.cpp:119
virtual llvm::Constant * getVTableAddressPoint(BaseSubobject Base, const CXXRecordDecl *VTableClass)=0
Get the address point of the vtable for the given base subobject.
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
virtual llvm::Value * readArrayCookieImpl(CodeGenFunction &IGF, Address ptr, CharUnits cookieSize)
Reads the array cookie for an allocation which is known to have one.
Definition: CGCXXABI.cpp:276
virtual llvm::Value * EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, const MemberPointerType *MPT)
Calculate an l-value from an object and a data member pointer.
Definition: CGCXXABI.cpp:65
virtual llvm::Value * getCXXDestructorImplicitParam(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating)=0
Get the implicit (second) parameter that comes after the "this" pointer, or nullptr if there is isn't...
virtual std::pair< llvm::Value *, const CXXRecordDecl * > LoadVTablePtr(CodeGenFunction &CGF, Address This, const CXXRecordDecl *RD)=0
Load a vtable from This, an object of polymorphic type RD, or from one of its virtual bases if it doe...
virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, bool ReturnAdjustment)=0
virtual llvm::Value * EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, Address This, DeleteOrMemberCallExpr E)=0
Emit the ABI-specific virtual destructor call.
bool mayNeedDestruction(const VarDecl *VD) const
Definition: CGCXXABI.cpp:163
virtual bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass)=0
Checks if ABI requires to initialize vptrs for given dynamic class.
virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E)=0
virtual llvm::Value * GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl)=0
virtual bool isThisCompleteObject(GlobalDecl GD) const =0
Determine whether there's something special about the rules of the ABI tell us that 'this' is a compl...
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
virtual void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, Address Ptr, QualType ElementType, const CXXDestructorDecl *Dtor)=0
virtual CatchTypeInfo getAddrOfCXXCatchHandlerType(QualType Ty, QualType C