clang 19.0.0git
ItaniumCXXABI.cpp
Go to the documentation of this file.
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGRecordLayout.h"
23#include "CGVTables.h"
24#include "CodeGenFunction.h"
25#include "CodeGenModule.h"
26#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/Mangle.h"
29#include "clang/AST/StmtCXX.h"
30#include "clang/AST/Type.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/ScopedPrinter.h"
38
39#include <optional>
40
41using namespace clang;
42using namespace CodeGen;
43
44namespace {
45class ItaniumCXXABI : public CodeGen::CGCXXABI {
46 /// VTables - All the vtables which have been defined.
47 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
48
49 /// All the thread wrapper functions that have been used.
51 ThreadWrappers;
52
53protected:
54 bool UseARMMethodPtrABI;
55 bool UseARMGuardVarABI;
56 bool Use32BitVTableOffsetABI;
57
59 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
60 }
61
62public:
63 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
64 bool UseARMMethodPtrABI = false,
65 bool UseARMGuardVarABI = false) :
66 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
67 UseARMGuardVarABI(UseARMGuardVarABI),
68 Use32BitVTableOffsetABI(false) { }
69
70 bool classifyReturnType(CGFunctionInfo &FI) const override;
71
72 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
73 // If C++ prohibits us from making a copy, pass by address.
74 if (!RD->canPassInRegisters())
75 return RAA_Indirect;
76 return RAA_Default;
77 }
78
79 bool isThisCompleteObject(GlobalDecl GD) const override {
80 // The Itanium ABI has separate complete-object vs. base-object
81 // variants of both constructors and destructors.
82 if (isa<CXXDestructorDecl>(GD.getDecl())) {
83 switch (GD.getDtorType()) {
84 case Dtor_Complete:
85 case Dtor_Deleting:
86 return true;
87
88 case Dtor_Base:
89 return false;
90
91 case Dtor_Comdat:
92 llvm_unreachable("emitting dtor comdat as function?");
93 }
94 llvm_unreachable("bad dtor kind");
95 }
96 if (isa<CXXConstructorDecl>(GD.getDecl())) {
97 switch (GD.getCtorType()) {
98 case Ctor_Complete:
99 return true;
100
101 case Ctor_Base:
102 return false;
103
106 llvm_unreachable("closure ctors in Itanium ABI?");
107
108 case Ctor_Comdat:
109 llvm_unreachable("emitting ctor comdat as function?");
110 }
111 llvm_unreachable("bad dtor kind");
112 }
113
114 // No other kinds.
115 return false;
116 }
117
118 bool isZeroInitializable(const MemberPointerType *MPT) override;
119
120 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
121
124 const Expr *E,
125 Address This,
126 llvm::Value *&ThisPtrForCall,
127 llvm::Value *MemFnPtr,
128 const MemberPointerType *MPT) override;
129
130 llvm::Value *
133 llvm::Value *MemPtr,
134 const MemberPointerType *MPT) override;
135
137 const CastExpr *E,
138 llvm::Value *Src) override;
139 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
140 llvm::Constant *Src) override;
141
142 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
143
144 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
145 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
146 CharUnits offset) override;
147 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
148 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
150
152 llvm::Value *L, llvm::Value *R,
153 const MemberPointerType *MPT,
154 bool Inequality) override;
155
157 llvm::Value *Addr,
158 const MemberPointerType *MPT) override;
159
161 Address Ptr, QualType ElementType,
162 const CXXDestructorDecl *Dtor) override;
163
164 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
165 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
166
167 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
168
169 llvm::CallInst *
171 llvm::Value *Exn) override;
172
173 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
174 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
177 QualType CatchHandlerType) override {
179 }
180
181 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
182 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
183 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
184 Address ThisPtr,
185 llvm::Type *StdTypeInfoPtrTy) override;
186
187 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
188 QualType SrcRecordTy) override;
189
190 /// Determine whether we know that all instances of type RecordTy will have
191 /// the same vtable pointer values, that is distinct from all other vtable
192 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
193 /// practice in some cases due to language extensions.
194 bool hasUniqueVTablePointer(QualType RecordTy) {
195 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
196
197 // Under -fapple-kext, multiple definitions of the same vtable may be
198 // emitted.
199 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
200 getContext().getLangOpts().AppleKext)
201 return false;
202
203 // If the type_info* would be null, the vtable might be merged with that of
204 // another type.
205 if (!CGM.shouldEmitRTTI())
206 return false;
207
208 // If there's only one definition of the vtable in the program, it has a
209 // unique address.
210 if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD)))
211 return true;
212
213 // Even if there are multiple definitions of the vtable, they are required
214 // by the ABI to use the same symbol name, so should be merged at load
215 // time. However, if the class has hidden visibility, there can be
216 // different versions of the class in different modules, and the ABI
217 // library might treat them as being the same.
218 if (CGM.GetLLVMVisibility(RD->getVisibility()) !=
219 llvm::GlobalValue::DefaultVisibility)
220 return false;
221
222 return true;
223 }
224
225 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
226 return hasUniqueVTablePointer(DestRecordTy);
227 }
228
230 QualType SrcRecordTy, QualType DestTy,
231 QualType DestRecordTy,
232 llvm::BasicBlock *CastEnd) override;
233
234 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
235 QualType SrcRecordTy, QualType DestTy,
236 QualType DestRecordTy,
237 llvm::BasicBlock *CastSuccess,
238 llvm::BasicBlock *CastFail) override;
239
241 QualType SrcRecordTy) override;
242
243 bool EmitBadCastCall(CodeGenFunction &CGF) override;
244
245 llvm::Value *
247 const CXXRecordDecl *ClassDecl,
248 const CXXRecordDecl *BaseClassDecl) override;
249
250 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
251
252 AddedStructorArgCounts
254 SmallVectorImpl<CanQualType> &ArgTys) override;
255
257 CXXDtorType DT) const override {
258 // Itanium does not emit any destructor variant as an inline thunk.
259 // Delegating may occur as an optimization, but all variants are either
260 // emitted with external linkage or as linkonce if they are inline and used.
261 return false;
262 }
263
264 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
265
267 FunctionArgList &Params) override;
268
270
271 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
272 const CXXConstructorDecl *D,
274 bool ForVirtualBase,
275 bool Delegating) override;
276
278 const CXXDestructorDecl *DD,
280 bool ForVirtualBase,
281 bool Delegating) override;
282
284 CXXDtorType Type, bool ForVirtualBase,
285 bool Delegating, Address This,
286 QualType ThisTy) override;
287
289 const CXXRecordDecl *RD) override;
290
292 CodeGenFunction::VPtr Vptr) override;
293
294 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
295 return true;
296 }
297
298 llvm::Constant *
300 const CXXRecordDecl *VTableClass) override;
301
303 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
304 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
305
306 llvm::Value *getVTableAddressPointInStructorWithVTT(
307 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
308 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
309
310 llvm::Constant *
312 const CXXRecordDecl *VTableClass) override;
313
314 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
315 CharUnits VPtrOffset) override;
316
318 Address This, llvm::Type *Ty,
319 SourceLocation Loc) override;
320
322 const CXXDestructorDecl *Dtor,
323 CXXDtorType DtorType, Address This,
324 DeleteOrMemberCallExpr E) override;
325
326 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
327
328 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
329 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
330
331 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
332 bool ReturnAdjustment) override {
333 // Allow inlining of thunks by emitting them with available_externally
334 // linkage together with vtables when needed.
335 if (ForVTable && !Thunk->hasLocalLinkage())
336 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
337 CGM.setGVProperties(Thunk, GD);
338 }
339
340 bool exportThunk() override { return true; }
341
342 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
343 const ThisAdjustment &TA) override;
344
345 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
346 const ReturnAdjustment &RA) override;
347
349 FunctionArgList &Args) const override {
350 assert(!Args.empty() && "expected the arglist to not be empty!");
351 return Args.size() - 1;
352 }
353
354 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
355 StringRef GetDeletedVirtualCallName() override
356 { return "__cxa_deleted_virtual"; }
357
358 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
360 Address NewPtr,
361 llvm::Value *NumElements,
362 const CXXNewExpr *expr,
363 QualType ElementType) override;
364 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
365 Address allocPtr,
366 CharUnits cookieSize) override;
367
368 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
369 llvm::GlobalVariable *DeclPtr,
370 bool PerformInit) override;
371 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
372 llvm::FunctionCallee dtor,
373 llvm::Constant *addr) override;
374
375 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
376 llvm::Value *Val);
378 CodeGenModule &CGM,
379 ArrayRef<const VarDecl *> CXXThreadLocals,
380 ArrayRef<llvm::Function *> CXXThreadLocalInits,
381 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
382
383 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
386 }
388 QualType LValType) override;
389
390 bool NeedsVTTParameter(GlobalDecl GD) override;
391
392 /**************************** RTTI Uniqueness ******************************/
393
394protected:
395 /// Returns true if the ABI requires RTTI type_info objects to be unique
396 /// across a program.
397 virtual bool shouldRTTIBeUnique() const { return true; }
398
399public:
400 /// What sort of unique-RTTI behavior should we use?
401 enum RTTIUniquenessKind {
402 /// We are guaranteeing, or need to guarantee, that the RTTI string
403 /// is unique.
404 RUK_Unique,
405
406 /// We are not guaranteeing uniqueness for the RTTI string, so we
407 /// can demote to hidden visibility but must use string comparisons.
408 RUK_NonUniqueHidden,
409
410 /// We are not guaranteeing uniqueness for the RTTI string, so we
411 /// have to use string comparisons, but we also have to emit it with
412 /// non-hidden visibility.
413 RUK_NonUniqueVisible
414 };
415
416 /// Return the required visibility status for the given type and linkage in
417 /// the current ABI.
418 RTTIUniquenessKind
419 classifyRTTIUniqueness(QualType CanTy,
420 llvm::GlobalValue::LinkageTypes Linkage) const;
421 friend class ItaniumRTTIBuilder;
422
423 void emitCXXStructor(GlobalDecl GD) override;
424
425 std::pair<llvm::Value *, const CXXRecordDecl *>
427 const CXXRecordDecl *RD) override;
428
429 private:
430 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
431 const auto &VtableLayout =
432 CGM.getItaniumVTableContext().getVTableLayout(RD);
433
434 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
435 // Skip empty slot.
436 if (!VtableComponent.isUsedFunctionPointerKind())
437 continue;
438
439 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
440 if (!Method->getCanonicalDecl()->isInlined())
441 continue;
442
443 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
444 auto *Entry = CGM.GetGlobalValue(Name);
445 // This checks if virtual inline function has already been emitted.
446 // Note that it is possible that this inline function would be emitted
447 // after trying to emit vtable speculatively. Because of this we do
448 // an extra pass after emitting all deferred vtables to find and emit
449 // these vtables opportunistically.
450 if (!Entry || Entry->isDeclaration())
451 return true;
452 }
453 return false;
454 }
455
456 bool isVTableHidden(const CXXRecordDecl *RD) const {
457 const auto &VtableLayout =
458 CGM.getItaniumVTableContext().getVTableLayout(RD);
459
460 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
461 if (VtableComponent.isRTTIKind()) {
462 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
463 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
464 return true;
465 } else if (VtableComponent.isUsedFunctionPointerKind()) {
466 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
467 if (Method->getVisibility() == Visibility::HiddenVisibility &&
468 !Method->isDefined())
469 return true;
470 }
471 }
472 return false;
473 }
474};
475
476class ARMCXXABI : public ItaniumCXXABI {
477public:
478 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
479 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
480 /*UseARMGuardVarABI=*/true) {}
481
482 bool constructorsAndDestructorsReturnThis() const override { return true; }
483
484 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
485 QualType ResTy) override;
486
487 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
488 Address InitializeArrayCookie(CodeGenFunction &CGF,
489 Address NewPtr,
490 llvm::Value *NumElements,
491 const CXXNewExpr *expr,
492 QualType ElementType) override;
493 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
494 CharUnits cookieSize) override;
495};
496
497class AppleARM64CXXABI : public ARMCXXABI {
498public:
499 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
500 Use32BitVTableOffsetABI = true;
501 }
502
503 // ARM64 libraries are prepared for non-unique RTTI.
504 bool shouldRTTIBeUnique() const override { return false; }
505};
506
507class FuchsiaCXXABI final : public ItaniumCXXABI {
508public:
509 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
510 : ItaniumCXXABI(CGM) {}
511
512private:
513 bool constructorsAndDestructorsReturnThis() const override { return true; }
514};
515
516class WebAssemblyCXXABI final : public ItaniumCXXABI {
517public:
518 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
519 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
520 /*UseARMGuardVarABI=*/true) {}
521 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
522 llvm::CallInst *
523 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
524 llvm::Value *Exn) override;
525
526private:
527 bool constructorsAndDestructorsReturnThis() const override { return true; }
528 bool canCallMismatchedFunctionType() const override { return false; }
529};
530
531class XLCXXABI final : public ItaniumCXXABI {
532public:
533 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
534 : ItaniumCXXABI(CGM) {}
535
536 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
537 llvm::FunctionCallee dtor,
538 llvm::Constant *addr) override;
539
540 bool useSinitAndSterm() const override { return true; }
541
542private:
543 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
544 llvm::Constant *addr);
545};
546}
547
549 switch (CGM.getContext().getCXXABIKind()) {
550 // For IR-generation purposes, there's no significant difference
551 // between the ARM and iOS ABIs.
552 case TargetCXXABI::GenericARM:
553 case TargetCXXABI::iOS:
554 case TargetCXXABI::WatchOS:
555 return new ARMCXXABI(CGM);
556
557 case TargetCXXABI::AppleARM64:
558 return new AppleARM64CXXABI(CGM);
559
560 case TargetCXXABI::Fuchsia:
561 return new FuchsiaCXXABI(CGM);
562
563 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
564 // include the other 32-bit ARM oddities: constructor/destructor return values
565 // and array cookies.
566 case TargetCXXABI::GenericAArch64:
567 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
568 /*UseARMGuardVarABI=*/true);
569
570 case TargetCXXABI::GenericMIPS:
571 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
572
573 case TargetCXXABI::WebAssembly:
574 return new WebAssemblyCXXABI(CGM);
575
576 case TargetCXXABI::XL:
577 return new XLCXXABI(CGM);
578
579 case TargetCXXABI::GenericItanium:
580 if (CGM.getContext().getTargetInfo().getTriple().getArch()
581 == llvm::Triple::le32) {
582 // For PNaCl, use ARM-style method pointers so that PNaCl code
583 // does not assume anything about the alignment of function
584 // pointers.
585 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
586 }
587 return new ItaniumCXXABI(CGM);
588
589 case TargetCXXABI::Microsoft:
590 llvm_unreachable("Microsoft ABI is not Itanium-based");
591 }
592 llvm_unreachable("bad ABI kind");
593}
594
595llvm::Type *
596ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
597 if (MPT->isMemberDataPointer())
598 return CGM.PtrDiffTy;
599 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
600}
601
602/// In the Itanium and ARM ABIs, method pointers have the form:
603/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
604///
605/// In the Itanium ABI:
606/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
607/// - the this-adjustment is (memptr.adj)
608/// - the virtual offset is (memptr.ptr - 1)
609///
610/// In the ARM ABI:
611/// - method pointers are virtual if (memptr.adj & 1) is nonzero
612/// - the this-adjustment is (memptr.adj >> 1)
613/// - the virtual offset is (memptr.ptr)
614/// ARM uses 'adj' for the virtual flag because Thumb functions
615/// may be only single-byte aligned.
616///
617/// If the member is virtual, the adjusted 'this' pointer points
618/// to a vtable pointer from which the virtual offset is applied.
619///
620/// If the member is non-virtual, memptr.ptr is the address of
621/// the function to call.
622CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
623 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
624 llvm::Value *&ThisPtrForCall,
625 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
626 CGBuilderTy &Builder = CGF.Builder;
627
628 const FunctionProtoType *FPT =
630 auto *RD =
631 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
632
633 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
634
635 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
636 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
637 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
638
639 // Extract memptr.adj, which is in the second field.
640 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
641
642 // Compute the true adjustment.
643 llvm::Value *Adj = RawAdj;
644 if (UseARMMethodPtrABI)
645 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
646
647 // Apply the adjustment and cast back to the original struct type
648 // for consistency.
649 llvm::Value *This = ThisAddr.getPointer();
650 This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj);
651 ThisPtrForCall = This;
652
653 // Load the function pointer.
654 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
655
656 // If the LSB in the function pointer is 1, the function pointer points to
657 // a virtual function.
658 llvm::Value *IsVirtual;
659 if (UseARMMethodPtrABI)
660 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
661 else
662 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
663 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
664 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
665
666 // In the virtual path, the adjustment left 'This' pointing to the
667 // vtable of the correct base subobject. The "function pointer" is an
668 // offset within the vtable (+1 for the virtual flag on non-ARM).
669 CGF.EmitBlock(FnVirtual);
670
671 // Cast the adjusted this to a pointer to vtable pointer and load.
672 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
673 CharUnits VTablePtrAlign =
674 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
675 CGF.getPointerAlign());
676 llvm::Value *VTable = CGF.GetVTablePtr(
677 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
678
679 // Apply the offset.
680 // On ARM64, to reserve extra space in virtual member function pointers,
681 // we only pay attention to the low 32 bits of the offset.
682 llvm::Value *VTableOffset = FnAsInt;
683 if (!UseARMMethodPtrABI)
684 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
685 if (Use32BitVTableOffsetABI) {
686 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
687 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
688 }
689
690 // Check the address of the function pointer if CFI on member function
691 // pointers is enabled.
692 llvm::Constant *CheckSourceLocation;
693 llvm::Constant *CheckTypeDesc;
694 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
695 CGM.HasHiddenLTOVisibility(RD);
696 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
697 CGM.HasHiddenLTOVisibility(RD);
698 bool ShouldEmitWPDInfo =
699 CGM.getCodeGenOpts().WholeProgramVTables &&
700 // Don't insert type tests if we are forcing public visibility.
701 !CGM.AlwaysHasLTOVisibilityPublic(RD);
702 llvm::Value *VirtualFn = nullptr;
703
704 {
705 CodeGenFunction::SanitizerScope SanScope(&CGF);
706 llvm::Value *TypeId = nullptr;
707 llvm::Value *CheckResult = nullptr;
708
709 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
710 // If doing CFI, VFE or WPD, we will need the metadata node to check
711 // against.
712 llvm::Metadata *MD =
713 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
714 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
715 }
716
717 if (ShouldEmitVFEInfo) {
718 llvm::Value *VFPAddr =
719 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
720
721 // If doing VFE, load from the vtable with a type.checked.load intrinsic
722 // call. Note that we use the GEP to calculate the address to load from
723 // and pass 0 as the offset to the intrinsic. This is because every
724 // vtable slot of the correct type is marked with matching metadata, and
725 // we know that the load must be from one of these slots.
726 llvm::Value *CheckedLoad = Builder.CreateCall(
727 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
728 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
729 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
730 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
731 } else {
732 // When not doing VFE, emit a normal load, as it allows more
733 // optimisations than type.checked.load.
734 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
735 llvm::Value *VFPAddr =
736 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
737 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
738 ? llvm::Intrinsic::type_test
739 : llvm::Intrinsic::public_type_test;
740
741 CheckResult =
742 Builder.CreateCall(CGM.getIntrinsic(IID), {VFPAddr, TypeId});
743 }
744
745 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
746 VirtualFn = CGF.Builder.CreateCall(
747 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
748 {VTableOffset->getType()}),
749 {VTable, VTableOffset});
750 } else {
751 llvm::Value *VFPAddr =
752 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
753 VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
754 CGF.getPointerAlign(),
755 "memptr.virtualfn");
756 }
757 }
758 assert(VirtualFn && "Virtual fuction pointer not created!");
759 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
760 CheckResult) &&
761 "Check result required but not created!");
762
763 if (ShouldEmitCFICheck) {
764 // If doing CFI, emit the check.
765 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
766 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
767 llvm::Constant *StaticData[] = {
768 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
769 CheckSourceLocation,
770 CheckTypeDesc,
771 };
772
773 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
774 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
775 } else {
776 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
777 CGM.getLLVMContext(),
778 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
779 llvm::Value *ValidVtable = Builder.CreateCall(
780 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
781 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
782 SanitizerHandler::CFICheckFail, StaticData,
783 {VTable, ValidVtable});
784 }
785
786 FnVirtual = Builder.GetInsertBlock();
787 }
788 } // End of sanitizer scope
789
790 CGF.EmitBranch(FnEnd);
791
792 // In the non-virtual path, the function pointer is actually a
793 // function pointer.
794 CGF.EmitBlock(FnNonVirtual);
795 llvm::Value *NonVirtualFn =
796 Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
797
798 // Check the function pointer if CFI on member function pointers is enabled.
799 if (ShouldEmitCFICheck) {
801 if (RD->hasDefinition()) {
802 CodeGenFunction::SanitizerScope SanScope(&CGF);
803
804 llvm::Constant *StaticData[] = {
805 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
806 CheckSourceLocation,
807 CheckTypeDesc,
808 };
809
810 llvm::Value *Bit = Builder.getFalse();
811 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
812 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
813 getContext().getMemberPointerType(
814 MPT->getPointeeType(),
815 getContext().getRecordType(Base).getTypePtr()));
816 llvm::Value *TypeId =
817 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
818
819 llvm::Value *TypeTest =
820 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
821 {NonVirtualFn, TypeId});
822 Bit = Builder.CreateOr(Bit, TypeTest);
823 }
824
825 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
826 SanitizerHandler::CFICheckFail, StaticData,
827 {NonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
828
829 FnNonVirtual = Builder.GetInsertBlock();
830 }
831 }
832
833 // We're done.
834 CGF.EmitBlock(FnEnd);
835 llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
836 CalleePtr->addIncoming(VirtualFn, FnVirtual);
837 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
838
839 CGCallee Callee(FPT, CalleePtr);
840 return Callee;
841}
842
843/// Compute an l-value by applying the given pointer-to-member to a
844/// base object.
845llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
846 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
847 const MemberPointerType *MPT) {
848 assert(MemPtr->getType() == CGM.PtrDiffTy);
849
850 CGBuilderTy &Builder = CGF.Builder;
851
852 // Apply the offset, which we assume is non-null.
853 return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr,
854 "memptr.offset");
855}
856
857/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
858/// conversion.
859///
860/// Bitcast conversions are always a no-op under Itanium.
861///
862/// Obligatory offset/adjustment diagram:
863/// <-- offset --> <-- adjustment -->
864/// |--------------------------|----------------------|--------------------|
865/// ^Derived address point ^Base address point ^Member address point
866///
867/// So when converting a base member pointer to a derived member pointer,
868/// we add the offset to the adjustment because the address point has
869/// decreased; and conversely, when converting a derived MP to a base MP
870/// we subtract the offset from the adjustment because the address point
871/// has increased.
872///
873/// The standard forbids (at compile time) conversion to and from
874/// virtual bases, which is why we don't have to consider them here.
875///
876/// The standard forbids (at run time) casting a derived MP to a base
877/// MP when the derived MP does not point to a member of the base.
878/// This is why -1 is a reasonable choice for null data member
879/// pointers.
880llvm::Value *
881ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
882 const CastExpr *E,
883 llvm::Value *src) {
884 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
885 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
886 E->getCastKind() == CK_ReinterpretMemberPointer);
887
888 // Under Itanium, reinterprets don't require any additional processing.
889 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
890
891 // Use constant emission if we can.
892 if (isa<llvm::Constant>(src))
893 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
894
895 llvm::Constant *adj = getMemberPointerAdjustment(E);
896 if (!adj) return src;
897
898 CGBuilderTy &Builder = CGF.Builder;
899 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
900
901 const MemberPointerType *destTy =
903
904 // For member data pointers, this is just a matter of adding the
905 // offset if the source is non-null.
906 if (destTy->isMemberDataPointer()) {
907 llvm::Value *dst;
908 if (isDerivedToBase)
909 dst = Builder.CreateNSWSub(src, adj, "adj");
910 else
911 dst = Builder.CreateNSWAdd(src, adj, "adj");
912
913 // Null check.
914 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
915 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
916 return Builder.CreateSelect(isNull, src, dst);
917 }
918
919 // The this-adjustment is left-shifted by 1 on ARM.
920 if (UseARMMethodPtrABI) {
921 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
922 offset <<= 1;
923 adj = llvm::ConstantInt::get(adj->getType(), offset);
924 }
925
926 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
927 llvm::Value *dstAdj;
928 if (isDerivedToBase)
929 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
930 else
931 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
932
933 return Builder.CreateInsertValue(src, dstAdj, 1);
934}
935
936llvm::Constant *
937ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
938 llvm::Constant *src) {
939 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
940 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
941 E->getCastKind() == CK_ReinterpretMemberPointer);
942
943 // Under Itanium, reinterprets don't require any additional processing.
944 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
945
946 // If the adjustment is trivial, we don't need to do anything.
947 llvm::Constant *adj = getMemberPointerAdjustment(E);
948 if (!adj) return src;
949
950 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
951
952 const MemberPointerType *destTy =
954
955 // For member data pointers, this is just a matter of adding the
956 // offset if the source is non-null.
957 if (destTy->isMemberDataPointer()) {
958 // null maps to null.
959 if (src->isAllOnesValue()) return src;
960
961 if (isDerivedToBase)
962 return llvm::ConstantExpr::getNSWSub(src, adj);
963 else
964 return llvm::ConstantExpr::getNSWAdd(src, adj);
965 }
966
967 // The this-adjustment is left-shifted by 1 on ARM.
968 if (UseARMMethodPtrABI) {
969 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
970 offset <<= 1;
971 adj = llvm::ConstantInt::get(adj->getType(), offset);
972 }
973
974 llvm::Constant *srcAdj = src->getAggregateElement(1);
975 llvm::Constant *dstAdj;
976 if (isDerivedToBase)
977 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
978 else
979 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
980
981 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
982 assert(res != nullptr && "Folding must succeed");
983 return res;
984}
985
986llvm::Constant *
987ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
988 // Itanium C++ ABI 2.3:
989 // A NULL pointer is represented as -1.
990 if (MPT->isMemberDataPointer())
991 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
992
993 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
994 llvm::Constant *Values[2] = { Zero, Zero };
995 return llvm::ConstantStruct::getAnon(Values);
996}
997
998llvm::Constant *
999ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1000 CharUnits offset) {
1001 // Itanium C++ ABI 2.3:
1002 // A pointer to data member is an offset from the base address of
1003 // the class object containing it, represented as a ptrdiff_t
1004 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1005}
1006
1007llvm::Constant *
1008ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1009 return BuildMemberPointer(MD, CharUnits::Zero());
1010}
1011
1012llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1014 assert(MD->isInstance() && "Member function must not be static!");
1015
1016 CodeGenTypes &Types = CGM.getTypes();
1017
1018 // Get the function pointer (or index if this is a virtual function).
1019 llvm::Constant *MemPtr[2];
1020 if (MD->isVirtual()) {
1021 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1022 uint64_t VTableOffset;
1023 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1024 // Multiply by 4-byte relative offsets.
1025 VTableOffset = Index * 4;
1026 } else {
1027 const ASTContext &Context = getContext();
1028 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1029 Context.getTargetInfo().getPointerWidth(LangAS::Default));
1030 VTableOffset = Index * PointerWidth.getQuantity();
1031 }
1032
1033 if (UseARMMethodPtrABI) {
1034 // ARM C++ ABI 3.2.1:
1035 // This ABI specifies that adj contains twice the this
1036 // adjustment, plus 1 if the member function is virtual. The
1037 // least significant bit of adj then makes exactly the same
1038 // discrimination as the least significant bit of ptr does for
1039 // Itanium.
1040 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1041 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1042 2 * ThisAdjustment.getQuantity() + 1);
1043 } else {
1044 // Itanium C++ ABI 2.3:
1045 // For a virtual function, [the pointer field] is 1 plus the
1046 // virtual table offset (in bytes) of the function,
1047 // represented as a ptrdiff_t.
1048 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1049 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1050 ThisAdjustment.getQuantity());
1051 }
1052 } else {
1053 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1054 llvm::Type *Ty;
1055 // Check whether the function has a computable LLVM signature.
1056 if (Types.isFuncTypeConvertible(FPT)) {
1057 // The function has a computable LLVM signature; use the correct type.
1058 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1059 } else {
1060 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1061 // function type is incomplete.
1062 Ty = CGM.PtrDiffTy;
1063 }
1064 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1065
1066 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1067 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1068 (UseARMMethodPtrABI ? 2 : 1) *
1069 ThisAdjustment.getQuantity());
1070 }
1071
1072 return llvm::ConstantStruct::getAnon(MemPtr);
1073}
1074
1075llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1076 QualType MPType) {
1077 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1078 const ValueDecl *MPD = MP.getMemberPointerDecl();
1079 if (!MPD)
1080 return EmitNullMemberPointer(MPT);
1081
1082 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1083
1084 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1085 return BuildMemberPointer(MD, ThisAdjustment);
1086
1087 CharUnits FieldOffset =
1088 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1089 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1090}
1091
1092/// The comparison algorithm is pretty easy: the member pointers are
1093/// the same if they're either bitwise identical *or* both null.
1094///
1095/// ARM is different here only because null-ness is more complicated.
1096llvm::Value *
1097ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1098 llvm::Value *L,
1099 llvm::Value *R,
1100 const MemberPointerType *MPT,
1101 bool Inequality) {
1102 CGBuilderTy &Builder = CGF.Builder;
1103
1104 llvm::ICmpInst::Predicate Eq;
1105 llvm::Instruction::BinaryOps And, Or;
1106 if (Inequality) {
1107 Eq = llvm::ICmpInst::ICMP_NE;
1108 And = llvm::Instruction::Or;
1109 Or = llvm::Instruction::And;
1110 } else {
1111 Eq = llvm::ICmpInst::ICMP_EQ;
1112 And = llvm::Instruction::And;
1113 Or = llvm::Instruction::Or;
1114 }
1115
1116 // Member data pointers are easy because there's a unique null
1117 // value, so it just comes down to bitwise equality.
1118 if (MPT->isMemberDataPointer())
1119 return Builder.CreateICmp(Eq, L, R);
1120
1121 // For member function pointers, the tautologies are more complex.
1122 // The Itanium tautology is:
1123 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1124 // The ARM tautology is:
1125 // (L == R) <==> (L.ptr == R.ptr &&
1126 // (L.adj == R.adj ||
1127 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1128 // The inequality tautologies have exactly the same structure, except
1129 // applying De Morgan's laws.
1130
1131 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1132 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1133
1134 // This condition tests whether L.ptr == R.ptr. This must always be
1135 // true for equality to hold.
1136 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1137
1138 // This condition, together with the assumption that L.ptr == R.ptr,
1139 // tests whether the pointers are both null. ARM imposes an extra
1140 // condition.
1141 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1142 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1143
1144 // This condition tests whether L.adj == R.adj. If this isn't
1145 // true, the pointers are unequal unless they're both null.
1146 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1147 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1148 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1149
1150 // Null member function pointers on ARM clear the low bit of Adj,
1151 // so the zero condition has to check that neither low bit is set.
1152 if (UseARMMethodPtrABI) {
1153 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1154
1155 // Compute (l.adj | r.adj) & 1 and test it against zero.
1156 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1157 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1158 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1159 "cmp.or.adj");
1160 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1161 }
1162
1163 // Tie together all our conditions.
1164 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1165 Result = Builder.CreateBinOp(And, PtrEq, Result,
1166 Inequality ? "memptr.ne" : "memptr.eq");
1167 return Result;
1168}
1169
1170llvm::Value *
1171ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1172 llvm::Value *MemPtr,
1173 const MemberPointerType *MPT) {
1174 CGBuilderTy &Builder = CGF.Builder;
1175
1176 /// For member data pointers, this is just a check against -1.
1177 if (MPT->isMemberDataPointer()) {
1178 assert(MemPtr->getType() == CGM.PtrDiffTy);
1179 llvm::Value *NegativeOne =
1180 llvm::Constant::getAllOnesValue(MemPtr->getType());
1181 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1182 }
1183
1184 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1185 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1186
1187 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1188 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1189
1190 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1191 // (the virtual bit) is set.
1192 if (UseARMMethodPtrABI) {
1193 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1194 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1195 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1196 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1197 "memptr.isvirtual");
1198 Result = Builder.CreateOr(Result, IsVirtual);
1199 }
1200
1201 return Result;
1202}
1203
1204bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1205 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1206 if (!RD)
1207 return false;
1208
1209 // If C++ prohibits us from making a copy, return by address.
1210 if (!RD->canPassInRegisters()) {
1211 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1212 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1213 return true;
1214 }
1215 return false;
1216}
1217
1218/// The Itanium ABI requires non-zero initialization only for data
1219/// member pointers, for which '0' is a valid offset.
1220bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1221 return MPT->isMemberFunctionPointer();
1222}
1223
1224/// The Itanium ABI always places an offset to the complete object
1225/// at entry -2 in the vtable.
1226void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1227 const CXXDeleteExpr *DE,
1228 Address Ptr,
1229 QualType ElementType,
1230 const CXXDestructorDecl *Dtor) {
1231 bool UseGlobalDelete = DE->isGlobalDelete();
1232 if (UseGlobalDelete) {
1233 // Derive the complete-object pointer, which is what we need
1234 // to pass to the deallocation function.
1235
1236 // Grab the vtable pointer as an intptr_t*.
1237 auto *ClassDecl =
1238 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1239 llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
1240
1241 // Track back to entry -2 and pull out the offset there.
1242 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1243 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1244 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,
1245 CGF.getPointerAlign());
1246
1247 // Apply the offset.
1248 llvm::Value *CompletePtr = Ptr.getPointer();
1249 CompletePtr =
1250 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1251
1252 // If we're supposed to call the global delete, make sure we do so
1253 // even if the destructor throws.
1254 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1255 ElementType);
1256 }
1257
1258 // FIXME: Provide a source location here even though there's no
1259 // CXXMemberCallExpr for dtor call.
1260 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1261 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1262
1263 if (UseGlobalDelete)
1264 CGF.PopCleanupBlock();
1265}
1266
1267void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1268 // void __cxa_rethrow();
1269
1270 llvm::FunctionType *FTy =
1271 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1272
1273 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1274
1275 if (isNoReturn)
1276 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt);
1277 else
1279}
1280
1281static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1282 // void *__cxa_allocate_exception(size_t thrown_size);
1283
1284 llvm::FunctionType *FTy =
1285 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1286
1287 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1288}
1289
1290static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1291 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1292 // void (*dest) (void *));
1293
1294 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1295 llvm::FunctionType *FTy =
1296 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1297
1298 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1299}
1300
1301void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1302 QualType ThrowType = E->getSubExpr()->getType();
1303 // Now allocate the exception object.
1304 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1305 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1306
1307 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1308 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1309 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1310
1311 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1312 CGF.EmitAnyExprToExn(
1313 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1314
1315 // Now throw the exception.
1316 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1317 /*ForEH=*/true);
1318
1319 // The address of the destructor. If the exception type has a
1320 // trivial destructor (or isn't a record), we just pass null.
1321 llvm::Constant *Dtor = nullptr;
1322 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1323 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1324 if (!Record->hasTrivialDestructor()) {
1325 CXXDestructorDecl *DtorD = Record->getDestructor();
1326 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1327 }
1328 }
1329 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1330
1331 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1333}
1334
1335static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1336 // void *__dynamic_cast(const void *sub,
1337 // global_as const abi::__class_type_info *src,
1338 // global_as const abi::__class_type_info *dst,
1339 // std::ptrdiff_t src2dst_offset);
1340
1341 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1342 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1343 llvm::Type *PtrDiffTy =
1345
1346 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1347
1348 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1349
1350 // Mark the function as nounwind willreturn readonly.
1351 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1352 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1353 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
1354 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
1355 llvm::AttributeList Attrs = llvm::AttributeList::get(
1356 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1357
1358 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1359}
1360
1361static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1362 // void __cxa_bad_cast();
1363 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1364 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1365}
1366
1367/// Compute the src2dst_offset hint as described in the
1368/// Itanium C++ ABI [2.9.7]
1370 const CXXRecordDecl *Src,
1371 const CXXRecordDecl *Dst) {
1372 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1373 /*DetectVirtual=*/false);
1374
1375 // If Dst is not derived from Src we can skip the whole computation below and
1376 // return that Src is not a public base of Dst. Record all inheritance paths.
1377 if (!Dst->isDerivedFrom(Src, Paths))
1378 return CharUnits::fromQuantity(-2ULL);
1379
1380 unsigned NumPublicPaths = 0;
1381 CharUnits Offset;
1382
1383 // Now walk all possible inheritance paths.
1384 for (const CXXBasePath &Path : Paths) {
1385 if (Path.Access != AS_public) // Ignore non-public inheritance.
1386 continue;
1387
1388 ++NumPublicPaths;
1389
1390 for (const CXXBasePathElement &PathElement : Path) {
1391 // If the path contains a virtual base class we can't give any hint.
1392 // -1: no hint.
1393 if (PathElement.Base->isVirtual())
1394 return CharUnits::fromQuantity(-1ULL);
1395
1396 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1397 continue;
1398
1399 // Accumulate the base class offsets.
1400 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1401 Offset += L.getBaseClassOffset(
1402 PathElement.Base->getType()->getAsCXXRecordDecl());
1403 }
1404 }
1405
1406 // -2: Src is not a public base of Dst.
1407 if (NumPublicPaths == 0)
1408 return CharUnits::fromQuantity(-2ULL);
1409
1410 // -3: Src is a multiple public base type but never a virtual base type.
1411 if (NumPublicPaths > 1)
1412 return CharUnits::fromQuantity(-3ULL);
1413
1414 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1415 // Return the offset of Src from the origin of Dst.
1416 return Offset;
1417}
1418
1419static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1420 // void __cxa_bad_typeid();
1421 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1422
1423 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1424}
1425
1426bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1427 QualType SrcRecordTy) {
1428 return IsDeref;
1429}
1430
1431void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1432 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1433 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1434 Call->setDoesNotReturn();
1435 CGF.Builder.CreateUnreachable();
1436}
1437
1438llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1439 QualType SrcRecordTy,
1440 Address ThisPtr,
1441 llvm::Type *StdTypeInfoPtrTy) {
1442 auto *ClassDecl =
1443 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1444 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
1445 ClassDecl);
1446
1447 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1448 // Load the type info.
1449 Value = CGF.Builder.CreateCall(
1450 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1451 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1452 } else {
1453 // Load the type info.
1454 Value =
1455 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1456 }
1457 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1458 CGF.getPointerAlign());
1459}
1460
1461bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1462 QualType SrcRecordTy) {
1463 return SrcIsPtr;
1464}
1465
1466llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1467 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1468 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1469 llvm::Type *PtrDiffLTy =
1471
1472 llvm::Value *SrcRTTI =
1474 llvm::Value *DestRTTI =
1476
1477 // Compute the offset hint.
1478 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1479 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1480 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1481 PtrDiffLTy,
1482 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1483
1484 // Emit the call to __dynamic_cast.
1485 llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint};
1486 llvm::Value *Value =
1488
1489 /// C++ [expr.dynamic.cast]p9:
1490 /// A failed cast to reference type throws std::bad_cast
1491 if (DestTy->isReferenceType()) {
1492 llvm::BasicBlock *BadCastBlock =
1493 CGF.createBasicBlock("dynamic_cast.bad_cast");
1494
1495 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1496 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1497
1498 CGF.EmitBlock(BadCastBlock);
1499 EmitBadCastCall(CGF);
1500 }
1501
1502 return Value;
1503}
1504
1505llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1506 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1507 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
1508 llvm::BasicBlock *CastFail) {
1509 ASTContext &Context = getContext();
1510
1511 // Find all the inheritance paths.
1512 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1513 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1514 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1515 /*DetectVirtual=*/false);
1516 (void)DestDecl->isDerivedFrom(SrcDecl, Paths);
1517
1518 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1519 // might appear.
1520 std::optional<CharUnits> Offset;
1521 for (const CXXBasePath &Path : Paths) {
1522 // dynamic_cast only finds public inheritance paths.
1523 if (Path.Access != AS_public)
1524 continue;
1525
1526 CharUnits PathOffset;
1527 for (const CXXBasePathElement &PathElement : Path) {
1528 // Find the offset along this inheritance step.
1529 const CXXRecordDecl *Base =
1530 PathElement.Base->getType()->getAsCXXRecordDecl();
1531 if (PathElement.Base->isVirtual()) {
1532 // For a virtual base class, we know that the derived class is exactly
1533 // DestDecl, so we can use the vbase offset from its layout.
1534 const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl);
1535 PathOffset = L.getVBaseClassOffset(Base);
1536 } else {
1537 const ASTRecordLayout &L =
1538 Context.getASTRecordLayout(PathElement.Class);
1539 PathOffset += L.getBaseClassOffset(Base);
1540 }
1541 }
1542
1543 if (!Offset)
1544 Offset = PathOffset;
1545 else if (Offset != PathOffset) {
1546 // Base appears in at least two different places. Find the most-derived
1547 // object and see if it's a DestDecl. Note that the most-derived object
1548 // must be at least as aligned as this base class subobject, and must
1549 // have a vptr at offset 0.
1550 ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
1551 CGF.VoidPtrTy, ThisAddr.getAlignment());
1552 SrcDecl = DestDecl;
1553 Offset = CharUnits::Zero();
1554 break;
1555 }
1556 }
1557
1558 if (!Offset) {
1559 // If there are no public inheritance paths, the cast always fails.
1560 CGF.EmitBranch(CastFail);
1561 return llvm::PoisonValue::get(CGF.VoidPtrTy);
1562 }
1563
1564 // Compare the vptr against the expected vptr for the destination type at
1565 // this offset. Note that we do not know what type ThisAddr points to in
1566 // the case where the derived class multiply inherits from the base class
1567 // so we can't use GetVTablePtr, so we load the vptr directly instead.
1568 llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
1569 ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
1570 CGM.DecorateInstructionWithTBAA(
1571 VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
1572 llvm::Value *Success = CGF.Builder.CreateICmpEQ(
1573 VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
1574 llvm::Value *Result = ThisAddr.getPointer();
1575 if (!Offset->isZero())
1576 Result = CGF.Builder.CreateInBoundsGEP(
1577 CGF.CharTy, Result,
1578 {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
1579 CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
1580 return Result;
1581}
1582
1583llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1584 Address ThisAddr,
1585 QualType SrcRecordTy) {
1586 auto *ClassDecl =
1587 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1588 llvm::Value *OffsetToTop;
1589 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1590 // Get the vtable pointer.
1591 llvm::Value *VTable =
1592 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1593
1594 // Get the offset-to-top from the vtable.
1595 OffsetToTop =
1596 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1597 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1598 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1599 } else {
1600 llvm::Type *PtrDiffLTy =
1602
1603 // Get the vtable pointer.
1604 llvm::Value *VTable =
1605 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1606
1607 // Get the offset-to-top from the vtable.
1608 OffsetToTop =
1609 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1610 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1611 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1612 }
1613 // Finally, add the offset to the pointer.
1614 return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(),
1615 OffsetToTop);
1616}
1617
1618bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1619 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1620 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1621 Call->setDoesNotReturn();
1622 CGF.Builder.CreateUnreachable();
1623 return true;
1624}
1625
1626llvm::Value *
1627ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1628 Address This,
1629 const CXXRecordDecl *ClassDecl,
1630 const CXXRecordDecl *BaseClassDecl) {
1631 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1632 CharUnits VBaseOffsetOffset =
1633 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1634 BaseClassDecl);
1635 llvm::Value *VBaseOffsetPtr =
1636 CGF.Builder.CreateConstGEP1_64(
1637 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1638 "vbase.offset.ptr");
1639
1640 llvm::Value *VBaseOffset;
1641 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1642 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1643 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1644 "vbase.offset");
1645 } else {
1646 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1647 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1648 }
1649 return VBaseOffset;
1650}
1651
1652void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1653 // Just make sure we're in sync with TargetCXXABI.
1654 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1655
1656 // The constructor used for constructing this as a base class;
1657 // ignores virtual bases.
1658 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1659
1660 // The constructor used for constructing this as a complete class;
1661 // constructs the virtual bases, then calls the base constructor.
1662 if (!D->getParent()->isAbstract()) {
1663 // We don't need to emit the complete ctor if the class is abstract.
1664 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1665 }
1666}
1667
1669ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1671 ASTContext &Context = getContext();
1672
1673 // All parameters are already in place except VTT, which goes after 'this'.
1674 // These are Clang types, so we don't need to worry about sret yet.
1675
1676 // Check if we need to add a VTT parameter (which has type global void **).
1677 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1678 : GD.getDtorType() == Dtor_Base) &&
1679 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1680 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1681 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1682 ArgTys.insert(ArgTys.begin() + 1,
1684 return AddedStructorArgCounts::prefix(1);
1685 }
1686 return AddedStructorArgCounts{};
1687}
1688
1689void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1690 // The destructor used for destructing this as a base class; ignores
1691 // virtual bases.
1692 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1693
1694 // The destructor used for destructing this as a most-derived class;
1695 // call the base destructor and then destructs any virtual bases.
1696 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1697
1698 // The destructor in a virtual table is always a 'deleting'
1699 // destructor, which calls the complete destructor and then uses the
1700 // appropriate operator delete.
1701 if (D->isVirtual())
1702 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1703}
1704
1705void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1706 QualType &ResTy,
1707 FunctionArgList &Params) {
1708 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1709 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1710
1711 // Check if we need a VTT parameter as well.
1712 if (NeedsVTTParameter(CGF.CurGD)) {
1713 ASTContext &Context = getContext();
1714
1715 // FIXME: avoid the fake decl
1716 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1717 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1718 QualType T = Context.getPointerType(Q);
1719 auto *VTTDecl = ImplicitParamDecl::Create(
1720 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1721 T, ImplicitParamKind::CXXVTT);
1722 Params.insert(Params.begin() + 1, VTTDecl);
1723 getStructorImplicitParamDecl(CGF) = VTTDecl;
1724 }
1725}
1726
1727void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1728 // Naked functions have no prolog.
1729 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1730 return;
1731
1732 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1733 /// adjustments are required, because they are all handled by thunks.
1734 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1735
1736 /// Initialize the 'vtt' slot if needed.
1737 if (getStructorImplicitParamDecl(CGF)) {
1738 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1739 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1740 }
1741
1742 /// If this is a function that the ABI specifies returns 'this', initialize
1743 /// the return slot to 'this' at the start of the function.
1744 ///
1745 /// Unlike the setting of return types, this is done within the ABI
1746 /// implementation instead of by clients of CGCXXABI because:
1747 /// 1) getThisValue is currently protected
1748 /// 2) in theory, an ABI could implement 'this' returns some other way;
1749 /// HasThisReturn only specifies a contract, not the implementation
1750 if (HasThisReturn(CGF.CurGD))
1751 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1752}
1753
1754CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1756 bool ForVirtualBase, bool Delegating) {
1757 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1758 return AddedStructorArgs{};
1759
1760 // Insert the implicit 'vtt' argument as the second argument. Make sure to
1761 // correctly reflect its address space, which can differ from generic on
1762 // some targets.
1763 llvm::Value *VTT =
1764 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1765 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1766 QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS);
1767 QualType VTTTy = getContext().getPointerType(Q);
1768 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1769}
1770
1771llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1773 bool ForVirtualBase, bool Delegating) {
1774 GlobalDecl GD(DD, Type);
1775 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1776}
1777
1778void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1779 const CXXDestructorDecl *DD,
1780 CXXDtorType Type, bool ForVirtualBase,
1781 bool Delegating, Address This,
1782 QualType ThisTy) {
1783 GlobalDecl GD(DD, Type);
1784 llvm::Value *VTT =
1785 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1786 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1787
1789 if (getContext().getLangOpts().AppleKext &&
1790 Type != Dtor_Base && DD->isVirtual())
1792 else
1793 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1794
1795 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1796 nullptr);
1797}
1798
1799void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1800 const CXXRecordDecl *RD) {
1801 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1802 if (VTable->hasInitializer())
1803 return;
1804
1805 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1806 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1807 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1808 llvm::Constant *RTTI =
1809 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1810
1811 // Create and set the initializer.
1812 ConstantInitBuilder builder(CGM);
1813 auto components = builder.beginStruct();
1814 CGVT.createVTableInitializer(components, VTLayout, RTTI,
1815 llvm::GlobalValue::isLocalLinkage(Linkage));
1816 components.finishAndSetAsInitializer(VTable);
1817
1818 // Set the correct linkage.
1819 VTable->setLinkage(Linkage);
1820
1821 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1822 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1823
1824 // Set the right visibility.
1825 CGM.setGVProperties(VTable, RD);
1826
1827 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1828 // we will emit the typeinfo for the fundamental types. This is the
1829 // same behaviour as GCC.
1830 const DeclContext *DC = RD->getDeclContext();
1831 if (RD->getIdentifier() &&
1832 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1833 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1834 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1836 EmitFundamentalRTTIDescriptors(RD);
1837
1838 // Always emit type metadata on non-available_externally definitions, and on
1839 // available_externally definitions if we are performing whole program
1840 // devirtualization. For WPD we need the type metadata on all vtable
1841 // definitions to ensure we associate derived classes with base classes
1842 // defined in headers but with a strong definition only in a shared library.
1843 if (!VTable->isDeclarationForLinker() ||
1844 CGM.getCodeGenOpts().WholeProgramVTables) {
1845 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1846 // For available_externally definitions, add the vtable to
1847 // @llvm.compiler.used so that it isn't deleted before whole program
1848 // analysis.
1849 if (VTable->isDeclarationForLinker()) {
1850 assert(CGM.getCodeGenOpts().WholeProgramVTables);
1851 CGM.addCompilerUsedGlobal(VTable);
1852 }
1853 }
1854
1855 if (VTContext.isRelativeLayout()) {
1856 CGVT.RemoveHwasanMetadata(VTable);
1857 if (!VTable->isDSOLocal())
1858 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1859 }
1860}
1861
1862bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1863 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1864 if (Vptr.NearestVBase == nullptr)
1865 return false;
1866 return NeedsVTTParameter(CGF.CurGD);
1867}
1868
1869llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1870 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1871 const CXXRecordDecl *NearestVBase) {
1872
1873 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1874 NeedsVTTParameter(CGF.CurGD)) {
1875 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1876 NearestVBase);
1877 }
1878 return getVTableAddressPoint(Base, VTableClass);
1879}
1880
1881llvm::Constant *
1882ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1883 const CXXRecordDecl *VTableClass) {
1884 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1885
1886 // Find the appropriate vtable within the vtable group, and the address point
1887 // within that vtable.
1889 CGM.getItaniumVTableContext()
1890 .getVTableLayout(VTableClass)
1891 .getAddressPoint(Base);
1892 llvm::Value *Indices[] = {
1893 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1894 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1895 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1896 };
1897
1898 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1899 Indices, /*InBounds=*/true,
1900 /*InRangeIndex=*/1);
1901}
1902
1903// Check whether all the non-inline virtual methods for the class have the
1904// specified attribute.
1905template <typename T>
1907 bool FoundNonInlineVirtualMethodWithAttr = false;
1908 for (const auto *D : RD->noload_decls()) {
1909 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1910 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1911 FD->doesThisDeclarationHaveABody())
1912 continue;
1913 if (!D->hasAttr<T>())
1914 return false;
1915 FoundNonInlineVirtualMethodWithAttr = true;
1916 }
1917 }
1918
1919 // We didn't find any non-inline virtual methods missing the attribute. We
1920 // will return true when we found at least one non-inline virtual with the
1921 // attribute. (This lets our caller know that the attribute needs to be
1922 // propagated up to the vtable.)
1923 return FoundNonInlineVirtualMethodWithAttr;
1924}
1925
1926llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1927 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1928 const CXXRecordDecl *NearestVBase) {
1929 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1930 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1931
1932 // Get the secondary vpointer index.
1933 uint64_t VirtualPointerIndex =
1934 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1935
1936 /// Load the VTT.
1937 llvm::Value *VTT = CGF.LoadCXXVTT();
1938 if (VirtualPointerIndex)
1939 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT,
1940 VirtualPointerIndex);
1941
1942 // And load the address point from the VTT.
1943 return CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
1944 CGF.getPointerAlign());
1945}
1946
1947llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1948 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1949 return getVTableAddressPoint(Base, VTableClass);
1950}
1951
1952llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1953 CharUnits VPtrOffset) {
1954 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1955
1956 llvm::GlobalVariable *&VTable = VTables[RD];
1957 if (VTable)
1958 return VTable;
1959
1960 // Queue up this vtable for possible deferred emission.
1961 CGM.addDeferredVTable(RD);
1962
1963 SmallString<256> Name;
1964 llvm::raw_svector_ostream Out(Name);
1965 getMangleContext().mangleCXXVTable(RD, Out);
1966
1967 const VTableLayout &VTLayout =
1968 CGM.getItaniumVTableContext().getVTableLayout(RD);
1969 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1970
1971 // Use pointer to global alignment for the vtable. Otherwise we would align
1972 // them based on the size of the initializer which doesn't make sense as only
1973 // single values are read.
1974 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1975 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1976 ? 32
1977 : CGM.getTarget().getPointerAlign(AS);
1978
1979 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1980 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1981 getContext().toCharUnitsFromBits(PAlign).getAsAlign());
1982 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1983
1984 // In MS C++ if you have a class with virtual functions in which you are using
1985 // selective member import/export, then all virtual functions must be exported
1986 // unless they are inline, otherwise a link error will result. To match this
1987 // behavior, for such classes, we dllimport the vtable if it is defined
1988 // externally and all the non-inline virtual methods are marked dllimport, and
1989 // we dllexport the vtable if it is defined in this TU and all the non-inline
1990 // virtual methods are marked dllexport.
1991 if (CGM.getTarget().hasPS4DLLImportExport()) {
1992 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1993 if (CGM.getVTables().isVTableExternal(RD)) {
1994 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1995 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1996 } else {
1997 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1998 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1999 }
2000 }
2001 }
2002 CGM.setGVProperties(VTable, RD);
2003
2004 return VTable;
2005}
2006
2007CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2008 GlobalDecl GD,
2009 Address This,
2010 llvm::Type *Ty,
2011 SourceLocation Loc) {
2012 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2013 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
2014 llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
2015
2016 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2017 llvm::Value *VFunc;
2018 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
2019 VFunc = CGF.EmitVTableTypeCheckedLoad(
2020 MethodDecl->getParent(), VTable, PtrTy,
2021 VTableIndex *
2022 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
2023 8);
2024 } else {
2025 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
2026
2027 llvm::Value *VFuncLoad;
2028 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2029 VFuncLoad = CGF.Builder.CreateCall(
2030 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
2031 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
2032 } else {
2033 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2034 PtrTy, VTable, VTableIndex, "vfn");
2035 VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
2036 CGF.getPointerAlign());
2037 }
2038
2039 // Add !invariant.load md to virtual function load to indicate that
2040 // function didn't change inside vtable.
2041 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2042 // help in devirtualization because it will only matter if we will have 2
2043 // the same virtual function loads from the same vtable load, which won't
2044 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2045 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2046 CGM.getCodeGenOpts().StrictVTablePointers) {
2047 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
2048 VFuncLoadInstr->setMetadata(
2049 llvm::LLVMContext::MD_invariant_load,
2050 llvm::MDNode::get(CGM.getLLVMContext(),
2052 }
2053 }
2054 VFunc = VFuncLoad;
2055 }
2056
2057 CGCallee Callee(GD, VFunc);
2058 return Callee;
2059}
2060
2061llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2062 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2063 Address This, DeleteOrMemberCallExpr E) {
2064 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2065 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2066 assert((CE != nullptr) ^ (D != nullptr));
2067 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2068 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2069
2070 GlobalDecl GD(Dtor, DtorType);
2071 const CGFunctionInfo *FInfo =
2072 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2073 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2074 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2075
2076 QualType ThisTy;
2077 if (CE) {
2078 ThisTy = CE->getObjectType();
2079 } else {
2080 ThisTy = D->getDestroyedType();
2081 }
2082
2083 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2084 QualType(), nullptr);
2085 return nullptr;
2086}
2087
2088void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2089 CodeGenVTables &VTables = CGM.getVTables();
2090 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2091 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2092}
2093
2094bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2095 const CXXRecordDecl *RD) const {
2096 // We don't emit available_externally vtables if we are in -fapple-kext mode
2097 // because kext mode does not permit devirtualization.
2098 if (CGM.getLangOpts().AppleKext)
2099 return false;
2100
2101 // If the vtable is hidden then it is not safe to emit an available_externally
2102 // copy of vtable.
2103 if (isVTableHidden(RD))
2104 return false;
2105
2106 if (CGM.getCodeGenOpts().ForceEmitVTables)
2107 return true;
2108
2109 // If we don't have any not emitted inline virtual function then we are safe
2110 // to emit an available_externally copy of vtable.
2111 // FIXME we can still emit a copy of the vtable if we
2112 // can emit definition of the inline functions.
2113 if (hasAnyUnusedVirtualInlineFunction(RD))
2114 return false;
2115
2116 // For a class with virtual bases, we must also be able to speculatively
2117 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2118 // the vtable" and "can emit the VTT". For a base subobject, this means we
2119 // need to be able to emit non-virtual base vtables.
2120 if (RD->getNumVBases()) {
2121 for (const auto &B : RD->bases()) {
2122 auto *BRD = B.getType()->getAsCXXRecordDecl();
2123 assert(BRD && "no class for base specifier");
2124 if (B.isVirtual() || !BRD->isDynamicClass())
2125 continue;
2126 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2127 return false;
2128 }
2129 }
2130
2131 return true;
2132}
2133
2134bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2135 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2136 return false;
2137
2138 // For a complete-object vtable (or more specifically, for the VTT), we need
2139 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2140 for (const auto &B : RD->vbases()) {
2141 auto *BRD = B.getType()->getAsCXXRecordDecl();
2142 assert(BRD && "no class for base specifier");
2143 if (!BRD->isDynamicClass())
2144 continue;
2145 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2146 return false;
2147 }
2148
2149 return true;
2150}
2152 Address InitialPtr,
2153 int64_t NonVirtualAdjustment,
2154 int64_t VirtualAdjustment,
2155 bool IsReturnAdjustment) {
2156 if (!NonVirtualAdjustment && !VirtualAdjustment)
2157 return InitialPtr.getPointer();
2158
2159 Address V = InitialPtr.withElementType(CGF.Int8Ty);
2160
2161 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2162 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2164 CharUnits::fromQuantity(NonVirtualAdjustment));
2165 }
2166
2167 // Perform the virtual adjustment if we have one.
2168 llvm::Value *ResultPtr;
2169 if (VirtualAdjustment) {
2170 Address VTablePtrPtr = V.withElementType(CGF.Int8PtrTy);
2171 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2172
2173 llvm::Value *Offset;
2174 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2175 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2177 // Load the adjustment offset from the vtable as a 32-bit int.
2178 Offset =
2179 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2181 } else {
2182 llvm::Type *PtrDiffTy =
2184
2185 // Load the adjustment offset from the vtable.
2186 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2187 CGF.getPointerAlign());
2188 }
2189 // Adjust our pointer.
2190 ResultPtr = CGF.Builder.CreateInBoundsGEP(
2191 V.getElementType(), V.getPointer(), Offset);
2192 } else {
2193 ResultPtr = V.getPointer();
2194 }
2195
2196 // In a derived-to-base conversion, the non-virtual adjustment is
2197 // applied second.
2198 if (NonVirtualAdjustment && IsReturnAdjustment) {
2199 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2200 NonVirtualAdjustment);
2201 }
2202
2203 return ResultPtr;
2204}
2205
2206llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2207 Address This,
2208 const ThisAdjustment &TA) {
2209 return performTypeAdjustment(CGF, This, TA.NonVirtual,
2211 /*IsReturnAdjustment=*/false);
2212}
2213
2214llvm::Value *
2215ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2216 const ReturnAdjustment &RA) {
2217 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2219 /*IsReturnAdjustment=*/true);
2220}
2221
2222void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2223 RValue RV, QualType ResultType) {
2224 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2225 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2226
2227 // Destructor thunks in the ARM ABI have indeterminate results.
2228 llvm::Type *T = CGF.ReturnValue.getElementType();
2229 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2230 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2231}
2232
2233/************************** Array allocation cookies **************************/
2234
2235CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2236 // The array cookie is a size_t; pad that up to the element alignment.
2237 // The cookie is actually right-justified in that space.
2238 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2239 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2240}
2241
2242Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2243 Address NewPtr,
2244 llvm::Value *NumElements,
2245 const CXXNewExpr *expr,
2246 QualType ElementType) {
2247 assert(requiresArrayCookie(expr));
2248
2249 unsigned AS = NewPtr.getAddressSpace();
2250
2251 ASTContext &Ctx = getContext();
2252 CharUnits SizeSize = CGF.getSizeSize();
2253
2254 // The size of the cookie.
2255 CharUnits CookieSize =
2256 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2257 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2258
2259 // Compute an offset to the cookie.
2260 Address CookiePtr = NewPtr;
2261 CharUnits CookieOffset = CookieSize - SizeSize;
2262 if (!CookieOffset.isZero())
2263 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2264
2265 // Write the number of elements into the appropriate slot.
2266 Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy);
2267 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2268
2269 // Handle the array cookie specially in ASan.
2270 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2271 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2272 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2273 // The store to the CookiePtr does not need to be instrumented.
2274 SI->setNoSanitizeMetadata();
2275 llvm::FunctionType *FTy =
2276 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2277 llvm::FunctionCallee F =
2278 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2279 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2280 }
2281
2282 // Finally, compute a pointer to the actual data buffer by skipping
2283 // over the cookie completely.
2284 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2285}
2286
2287llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2288 Address allocPtr,
2289 CharUnits cookieSize) {
2290 // The element size is right-justified in the cookie.
2291 Address numElementsPtr = allocPtr;
2292 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2293 if (!numElementsOffset.isZero())
2294 numElementsPtr =
2295 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2296
2297 unsigned AS = allocPtr.getAddressSpace();
2298 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2299 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2300 return CGF.Builder.CreateLoad(numElementsPtr);
2301 // In asan mode emit a function call instead of a regular load and let the
2302 // run-time deal with it: if the shadow is properly poisoned return the
2303 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2304 // We can't simply ignore this load using nosanitize metadata because
2305 // the metadata may be lost.
2306 llvm::FunctionType *FTy =
2307 llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
2308 llvm::FunctionCallee F =
2309 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2310 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2311}
2312
2313CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2314 // ARM says that the cookie is always:
2315 // struct array_cookie {
2316 // std::size_t element_size; // element_size != 0
2317 // std::size_t element_count;
2318 // };
2319 // But the base ABI doesn't give anything an alignment greater than
2320 // 8, so we can dismiss this as typical ABI-author blindness to
2321 // actual language complexity and round up to the element alignment.
2322 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2323 CGM.getContext().getTypeAlignInChars(elementType));
2324}
2325
2326Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2327 Address newPtr,
2328 llvm::Value *numElements,
2329 const CXXNewExpr *expr,
2330 QualType elementType) {
2331 assert(requiresArrayCookie(expr));
2332
2333 // The cookie is always at the start of the buffer.
2334 Address cookie = newPtr;
2335
2336 // The first element is the element size.
2337 cookie = cookie.withElementType(CGF.SizeTy);
2338 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2339 getContext().getTypeSizeInChars(elementType).getQuantity());
2340 CGF.Builder.CreateStore(elementSize, cookie);
2341
2342 // The second element is the element count.
2343 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2344 CGF.Builder.CreateStore(numElements, cookie);
2345
2346 // Finally, compute a pointer to the actual data buffer by skipping
2347 // over the cookie completely.
2348 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2349 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2350}
2351
2352llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2353 Address allocPtr,
2354 CharUnits cookieSize) {
2355 // The number of elements is at offset sizeof(size_t) relative to
2356 // the allocated pointer.
2357 Address numElementsPtr
2358 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2359
2360 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2361 return CGF.Builder.CreateLoad(numElementsPtr);
2362}
2363
2364/*********************** Static local initialization **************************/
2365
2366static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2367 llvm::PointerType *GuardPtrTy) {
2368 // int __cxa_guard_acquire(__guard *guard_object);
2369 llvm::FunctionType *FTy =
2370 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2371 GuardPtrTy, /*isVarArg=*/false);
2372 return CGM.CreateRuntimeFunction(
2373 FTy, "__cxa_guard_acquire",
2374 llvm::AttributeList::get(CGM.getLLVMContext(),
2375 llvm::AttributeList::FunctionIndex,
2376 llvm::Attribute::NoUnwind));
2377}
2378
2379static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2380 llvm::PointerType *GuardPtrTy) {
2381 // void __cxa_guard_release(__guard *guard_object);
2382 llvm::FunctionType *FTy =
2383 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2384 return CGM.CreateRuntimeFunction(
2385 FTy, "__cxa_guard_release",
2386 llvm::AttributeList::get(CGM.getLLVMContext(),
2387 llvm::AttributeList::FunctionIndex,
2388 llvm::Attribute::NoUnwind));
2389}
2390
2391static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2392 llvm::PointerType *GuardPtrTy) {
2393 // void __cxa_guard_abort(__guard *guard_object);
2394 llvm::FunctionType *FTy =
2395 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2396 return CGM.CreateRuntimeFunction(
2397 FTy, "__cxa_guard_abort",
2398 llvm::AttributeList::get(CGM.getLLVMContext(),
2399 llvm::AttributeList::FunctionIndex,
2400 llvm::Attribute::NoUnwind));
2401}
2402
2403namespace {
2404 struct CallGuardAbort final : EHScopeStack::Cleanup {
2405 llvm::GlobalVariable *Guard;
2406 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2407
2408 void Emit(CodeGenFunction &CGF, Flags flags) override {
2409 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2410 Guard);
2411 }
2412 };
2413}
2414
2415/// The ARM code here follows the Itanium code closely enough that we
2416/// just special-case it at particular places.
2417void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2418 const VarDecl &D,
2419 llvm::GlobalVariable *var,
2420 bool shouldPerformInit) {
2421 CGBuilderTy &Builder = CGF.Builder;
2422
2423 // Inline variables that weren't instantiated from variable templates have
2424 // partially-ordered initialization within their translation unit.
2425 bool NonTemplateInline =
2426 D.isInline() &&
2428
2429 // We only need to use thread-safe statics for local non-TLS variables and
2430 // inline variables; other global initialization is always single-threaded
2431 // or (through lazy dynamic loading in multiple threads) unsequenced.
2432 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2433 (D.isLocalVarDecl() || NonTemplateInline) &&
2434 !D.getTLSKind();
2435
2436 // If we have a global variable with internal linkage and thread-safe statics
2437 // are disabled, we can just let the guard variable be of type i8.
2438 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2439
2440 llvm::IntegerType *guardTy;
2441 CharUnits guardAlignment;
2442 if (useInt8GuardVariable) {
2443 guardTy = CGF.Int8Ty;
2444 guardAlignment = CharUnits::One();
2445 } else {
2446 // Guard variables are 64 bits in the generic ABI and size width on ARM
2447 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2448 if (UseARMGuardVarABI) {
2449 guardTy = CGF.SizeTy;
2450 guardAlignment = CGF.getSizeAlign();
2451 } else {
2452 guardTy = CGF.Int64Ty;
2453 guardAlignment =
2454 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
2455 }
2456 }
2457 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2458 CGF.CGM.getLLVMContext(),
2459 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2460
2461 // Create the guard variable if we don't already have it (as we
2462 // might if we're double-emitting this function body).
2463 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2464 if (!guard) {
2465 // Mangle the name for the guard.
2466 SmallString<256> guardName;
2467 {
2468 llvm::raw_svector_ostream out(guardName);
2469 getMangleContext().mangleStaticGuardVariable(&D, out);
2470 }
2471
2472 // Create the guard variable with a zero-initializer.
2473 // Just absorb linkage, visibility and dll storage class from the guarded
2474 // variable.
2475 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2476 false, var->getLinkage(),
2477 llvm::ConstantInt::get(guardTy, 0),
2478 guardName.str());
2479 guard->setDSOLocal(var->isDSOLocal());
2480 guard->setVisibility(var->getVisibility());
2481 guard->setDLLStorageClass(var->getDLLStorageClass());
2482 // If the variable is thread-local, so is its guard variable.
2483 guard->setThreadLocalMode(var->getThreadLocalMode());
2484 guard->setAlignment(guardAlignment.getAsAlign());
2485
2486 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2487 // group as the associated data object." In practice, this doesn't work for
2488 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2489 llvm::Comdat *C = var->getComdat();
2490 if (!D.isLocalVarDecl() && C &&
2491 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2492 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2493 guard->setComdat(C);
2494 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2495 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2496 }
2497
2498 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2499 }
2500
2501 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2502
2503 // Test whether the variable has completed initialization.
2504 //
2505 // Itanium C++ ABI 3.3.2:
2506 // The following is pseudo-code showing how these functions can be used:
2507 // if (obj_guard.first_byte == 0) {
2508 // if ( __cxa_guard_acquire (&obj_guard) ) {
2509 // try {
2510 // ... initialize the object ...;
2511 // } catch (...) {
2512 // __cxa_guard_abort (&obj_guard);
2513 // throw;
2514 // }
2515 // ... queue object destructor with __cxa_atexit() ...;
2516 // __cxa_guard_release (&obj_guard);
2517 // }
2518 // }
2519 //
2520 // If threadsafe statics are enabled, but we don't have inline atomics, just
2521 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2522 // actually inline, and the user might not expect calls to __atomic libcalls.
2523
2524 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2525 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2526 if (!threadsafe || MaxInlineWidthInBits) {
2527 // Load the first byte of the guard variable.
2528 llvm::LoadInst *LI =
2529 Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty));
2530
2531 // Itanium ABI:
2532 // An implementation supporting thread-safety on multiprocessor
2533 // systems must also guarantee that references to the initialized
2534 // object do not occur before the load of the initialization flag.
2535 //
2536 // In LLVM, we do this by marking the load Acquire.
2537 if (threadsafe)
2538 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2539
2540 // For ARM, we should only check the first bit, rather than the entire byte:
2541 //
2542 // ARM C++ ABI 3.2.3.1:
2543 // To support the potential use of initialization guard variables
2544 // as semaphores that are the target of ARM SWP and LDREX/STREX
2545 // synchronizing instructions we define a static initialization
2546 // guard variable to be a 4-byte aligned, 4-byte word with the
2547 // following inline access protocol.
2548 // #define INITIALIZED 1
2549 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2550 // if (__cxa_guard_acquire(&obj_guard))
2551 // ...
2552 // }
2553 //
2554 // and similarly for ARM64:
2555 //
2556 // ARM64 C++ ABI 3.2.2:
2557 // This ABI instead only specifies the value bit 0 of the static guard
2558 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2559 // variable is not initialized and 1 when it is.
2560 llvm::Value *V =
2561 (UseARMGuardVarABI && !useInt8GuardVariable)
2562 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2563 : LI;
2564 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2565
2566 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2567
2568 // Check if the first byte of the guard variable is zero.
2569 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2570 CodeGenFunction::GuardKind::VariableGuard, &D);
2571
2572 CGF.EmitBlock(InitCheckBlock);
2573 }
2574
2575 // The semantics of dynamic initialization of variables with static or thread
2576 // storage duration depends on whether they are declared at block-scope. The
2577 // initialization of such variables at block-scope can be aborted with an
2578 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2579 // to their initialization has undefined behavior (also per C++20
2580 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2581 // lead to termination (per C++20 [except.terminate]p1), and recursive
2582 // references to the variables are governed only by the lifetime rules (per
2583 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2584 // long as they avoid touching memory. As a result, block-scope variables must
2585 // not be marked as initialized until after initialization completes (unless
2586 // the mark is reverted following an exception), but non-block-scope variables
2587 // must be marked prior to initialization so that recursive accesses during
2588 // initialization do not restart initialization.
2589
2590 // Variables used when coping with thread-safe statics and exceptions.
2591 if (threadsafe) {
2592 // Call __cxa_guard_acquire.
2593 llvm::Value *V
2594 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2595
2596 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2597
2598 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2599 InitBlock, EndBlock);
2600
2601 // Call __cxa_guard_abort along the exceptional edge.
2602 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2603
2604 CGF.EmitBlock(InitBlock);
2605 } else if (!D.isLocalVarDecl()) {
2606 // For non-local variables, store 1 into the first byte of the guard
2607 // variable before the object initialization begins so that references
2608 // to the variable during initialization don't restart initialization.
2609 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2610 guardAddr.withElementType(CGM.Int8Ty));
2611 }
2612
2613 // Emit the initializer and add a global destructor if appropriate.
2614 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2615
2616 if (threadsafe) {
2617 // Pop the guard-abort cleanup if we pushed one.
2618 CGF.PopCleanupBlock();
2619
2620 // Call __cxa_guard_release. This cannot throw.
2621 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2622 guardAddr.getPointer());
2623 } else if (D.isLocalVarDecl()) {
2624 // For local variables, store 1 into the first byte of the guard variable
2625 // after the object initialization completes so that initialization is
2626 // retried if initialization is interrupted by an exception.
2627 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2628 guardAddr.withElementType(CGM.Int8Ty));
2629 }
2630
2631 CGF.EmitBlock(EndBlock);
2632}
2633
2634/// Register a global destructor using __cxa_atexit.
2636 llvm::FunctionCallee dtor,
2637 llvm::Constant *addr, bool TLS) {
2638 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2639 "unexpected call to emitGlobalDtorWithCXAAtExit");
2640 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2641 "__cxa_atexit is disabled");
2642 const char *Name = "__cxa_atexit";
2643 if (TLS) {
2644 const llvm::Triple &T = CGF.getTarget().getTriple();
2645 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2646 }
2647
2648 // We're assuming that the destructor function is something we can
2649 // reasonably call with the default CC.
2650 llvm::Type *dtorTy = CGF.UnqualPtrTy;
2651
2652 // Preserve address space of addr.
2653 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2654 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS)
2655 : CGF.Int8PtrTy;
2656
2657 // Create a variable that binds the atexit to this shared object.
2658 llvm::Constant *handle =
2659 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2660 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2661 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2662
2663 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2664 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2665 llvm::FunctionType *atexitTy =
2666 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2667
2668 // Fetch the actual function.
2669 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2670 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2671 fn->setDoesNotThrow();
2672
2673 if (!addr)
2674 // addr is null when we are trying to register a dtor annotated with
2675 // __attribute__((destructor)) in a constructor function. Using null here is
2676 // okay because this argument is just passed back to the destructor
2677 // function.
2678 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2679
2680 llvm::Value *args[] = {dtor.getCallee(), addr, handle};
2681 CGF.EmitNounwindRuntimeCall(atexit, args);
2682}
2683
2685 StringRef FnName) {
2686 // Create a function that registers/unregisters destructors that have the same
2687 // priority.
2688 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2689 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2690 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2691
2692 return GlobalInitOrCleanupFn;
2693}
2694
2695void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2696 for (const auto &I : DtorsUsingAtExit) {
2697 int Priority = I.first;
2698 std::string GlobalCleanupFnName =
2699 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2700
2701 llvm::Function *GlobalCleanupFn =
2702 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2703
2704 CodeGenFunction CGF(*this);
2705 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2706 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2709
2710 // Get the destructor function type, void(*)(void).
2711 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2712
2713 // Destructor functions are run/unregistered in non-ascending
2714 // order of their priorities.
2715 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2716 auto itv = Dtors.rbegin();
2717 while (itv != Dtors.rend()) {
2718 llvm::Function *Dtor = *itv;
2719
2720 // We're assuming that the destructor function is something we can
2721 // reasonably call with the correct CC.
2722 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor);
2723 llvm::Value *NeedsDestruct =
2724 CGF.Builder.CreateIsNull(V, "needs_destruct");
2725
2726 llvm::BasicBlock *DestructCallBlock =
2727 CGF.createBasicBlock("destruct.call");
2728 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2729 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2730 // Check if unatexit returns a value of 0. If it does, jump to
2731 // DestructCallBlock, otherwise jump to EndBlock directly.
2732 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2733
2734 CGF.EmitBlock(DestructCallBlock);
2735
2736 // Emit the call to casted Dtor.
2737 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor);
2738 // Make sure the call and the callee agree on calling convention.
2739 CI->setCallingConv(Dtor->getCallingConv());
2740
2741 CGF.EmitBlock(EndBlock);
2742
2743 itv++;
2744 }
2745
2746 CGF.FinishFunction();
2747 AddGlobalDtor(GlobalCleanupFn, Priority);
2748 }
2749}
2750
2751void CodeGenModule::registerGlobalDtorsWithAtExit() {
2752 for (const auto &I : DtorsUsingAtExit) {
2753 int Priority = I.first;
2754 std::string GlobalInitFnName =
2755 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2756 llvm::Function *GlobalInitFn =
2757 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2758
2759 CodeGenFunction CGF(*this);
2760 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2761 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2764
2765 // Since constructor functions are run in non-descending order of their
2766 // priorities, destructors are registered in non-descending order of their
2767 // priorities, and since destructor functions are run in the reverse order
2768 // of their registration, destructor functions are run in non-ascending
2769 // order of their priorities.
2770 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2771 for (auto *Dtor : Dtors) {
2772 // Register the destructor function calling __cxa_atexit if it is
2773 // available. Otherwise fall back on calling atexit.
2774 if (getCodeGenOpts().CXAAtExit) {
2775 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2776 } else {
2777 // We're assuming that the destructor function is something we can
2778 // reasonably call with the correct CC.
2780 }
2781 }
2782
2783 CGF.FinishFunction();
2784 AddGlobalCtor(GlobalInitFn, Priority);
2785 }
2786
2787 if (getCXXABI().useSinitAndSterm())
2788 unregisterGlobalDtorsWithUnAtExit();
2789}
2790
2791/// Register a global destructor as best as we know how.
2792void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2793 llvm::FunctionCallee dtor,
2794 llvm::Constant *addr) {
2795 if (D.isNoDestroy(CGM.getContext()))
2796 return;
2797
2798 // OpenMP offloading supports C++ constructors and destructors but we do not
2799 // always have 'atexit' available. Instead lower these to use the LLVM global
2800 // destructors which we can handle directly in the runtime. Note that this is
2801 // not strictly 1-to-1 with using `atexit` because we no longer tear down
2802 // globals in reverse order of when they were constructed.
2803 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
2804 return CGF.registerGlobalDtorWithLLVM(D, dtor, addr);
2805
2806 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2807 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2808 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2809 // We can always use __cxa_thread_atexit.
2810 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2811 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2812
2813 // In Apple kexts, we want to add a global destructor entry.
2814 // FIXME: shouldn't this be guarded by some variable?
2815 if (CGM.getLangOpts().AppleKext) {
2816 // Generate a global destructor entry.
2817 return CGM.AddCXXDtorEntry(dtor, addr);
2818 }
2819
2820 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2821}
2822
2825 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2826 // Darwin prefers to have references to thread local variables to go through
2827 // the thread wrapper instead of directly referencing the backing variable.
2828 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2829 CGM.getTarget().getTriple().isOSDarwin();
2830}
2831
2832/// Get the appropriate linkage for the wrapper function. This is essentially
2833/// the weak form of the variable's linkage; every translation unit which needs
2834/// the wrapper emits a copy, and we want the linker to merge them.
2835static llvm::GlobalValue::LinkageTypes
2837 llvm::GlobalValue::LinkageTypes VarLinkage =
2839
2840 // For internal linkage variables, we don't need an external or weak wrapper.
2841 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2842 return VarLinkage;
2843
2844 // If the thread wrapper is replaceable, give it appropriate linkage.
2845 if (isThreadWrapperReplaceable(VD, CGM))
2846 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2847 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2848 return VarLinkage;
2849 return llvm::GlobalValue::WeakODRLinkage;
2850}
2851
2852llvm::Function *
2853ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2854 llvm::Value *Val) {
2855 // Mangle the name for the thread_local wrapper function.
2856 SmallString<256> WrapperName;
2857 {
2858 llvm::raw_svector_ostream Out(WrapperName);
2859 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2860 }
2861
2862 // FIXME: If VD is a definition, we should regenerate the function attributes
2863 // before returning.
2864 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2865 return cast<llvm::Function>(V);
2866
2867 QualType RetQT = VD->getType();
2868 if (RetQT->isReferenceType())
2869 RetQT = RetQT.getNonReferenceType();
2870
2871 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2872 getContext().getPointerType(RetQT), FunctionArgList());
2873
2874 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2875 llvm::Function *Wrapper =
2876 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2877 WrapperName.str(), &CGM.getModule());
2878
2879 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2880 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2881
2882 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2883
2884 // Always resolve references to the wrapper at link time.
2885 if (!Wrapper->hasLocalLinkage())
2886 if (!isThreadWrapperReplaceable(VD, CGM) ||
2887 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2888 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2890 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2891
2892 if (isThreadWrapperReplaceable(VD, CGM)) {
2893 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2894 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2895 }
2896
2897 ThreadWrappers.push_back({VD, Wrapper});
2898 return Wrapper;
2899}
2900
2901void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2902 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2903 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2904 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2905 llvm::Function *InitFunc = nullptr;
2906
2907 // Separate initializers into those with ordered (or partially-ordered)
2908 // initialization and those with unordered initialization.
2910 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2911 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2913 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2914 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2915 CXXThreadLocalInits[I];
2916 else
2917 OrderedInits.push_back(CXXThreadLocalInits[I]);
2918 }
2919
2920 if (!OrderedInits.empty()) {
2921 // Generate a guarded initialization function.
2922 llvm::FunctionType *FTy =
2923 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2925 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2927 /*TLS=*/true);
2928 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2929 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2930 llvm::GlobalVariable::InternalLinkage,
2931 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2932 Guard->setThreadLocal(true);
2933 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2934
2935 CharUnits GuardAlign = CharUnits::One();
2936 Guard->setAlignment(GuardAlign.getAsAlign());
2937
2939 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2940 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2941 if (CGM.getTarget().getTriple().isOSDarwin()) {
2942 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2943 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2944 }
2945 }
2946
2947 // Create declarations for thread wrappers for all thread-local variables
2948 // with non-discardable definitions in this translation unit.
2949 for (const VarDecl *VD : CXXThreadLocals) {
2950 if (VD->hasDefinition() &&
2951 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2952 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2953 getOrCreateThreadLocalWrapper(VD, GV);
2954 }
2955 }
2956
2957 // Emit all referenced thread wrappers.
2958 for (auto VDAndWrapper : ThreadWrappers) {
2959 const VarDecl *VD = VDAndWrapper.first;
2960 llvm::GlobalVariable *Var =
2961 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2962 llvm::Function *Wrapper = VDAndWrapper.second;
2963
2964 // Some targets require that all access to thread local variables go through
2965 // the thread wrapper. This means that we cannot attempt to create a thread
2966 // wrapper or a thread helper.
2967 if (!VD->hasDefinition()) {
2968 if (isThreadWrapperReplaceable(VD, CGM)) {
2969 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2970 continue;
2971 }
2972
2973 // If this isn't a TU in which this variable is defined, the thread
2974 // wrapper is discardable.
2975 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2976 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2977 }
2978
2979 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2980
2981 // Mangle the name for the thread_local initialization function.
2982 SmallString<256> InitFnName;
2983 {
2984 llvm::raw_svector_ostream Out(InitFnName);
2985 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2986 }
2987
2988 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2989
2990 // If we have a definition for the variable, emit the initialization
2991 // function as an alias to the global Init function (if any). Otherwise,
2992 // produce a declaration of the initialization function.
2993 llvm::GlobalValue *Init = nullptr;
2994 bool InitIsInitFunc = false;
2995 bool HasConstantInitialization = false;
2996 if (!usesThreadWrapperFunction(VD)) {
2997 HasConstantInitialization = true;
2998 } else if (VD->hasDefinition()) {
2999 InitIsInitFunc = true;
3000 llvm::Function *InitFuncToUse = InitFunc;
3002 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
3003 if (InitFuncToUse)
3004 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
3005 InitFuncToUse);
3006 } else {
3007 // Emit a weak global function referring to the initialization function.
3008 // This function will not exist if the TU defining the thread_local
3009 // variable in question does not need any dynamic initialization for
3010 // its thread_local variables.
3011 Init = llvm::Function::Create(InitFnTy,
3012 llvm::GlobalVariable::ExternalWeakLinkage,
3013 InitFnName.str(), &CGM.getModule());
3016 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
3017 }
3018
3019 if (Init) {
3020 Init->setVisibility(Var->getVisibility());
3021 // Don't mark an extern_weak function DSO local on windows.
3022 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3023 Init->setDSOLocal(Var->isDSOLocal());
3024 }
3025
3026 llvm::LLVMContext &Context = CGM.getModule().getContext();
3027
3028 // The linker on AIX is not happy with missing weak symbols. However,
3029 // other TUs will not know whether the initialization routine exists
3030 // so create an empty, init function to satisfy the linker.
3031 // This is needed whenever a thread wrapper function is not used, and
3032 // also when the symbol is weak.
3033 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3034 isEmittedWithConstantInitializer(VD, true) &&
3035 !mayNeedDestruction(VD)) {
3036 // Init should be null. If it were non-null, then the logic above would
3037 // either be defining the function to be an alias or declaring the
3038 // function with the expectation that the definition of the variable
3039 // is elsewhere.
3040 assert(Init == nullptr && "Expected Init to be null.");
3041
3042 llvm::Function *Func = llvm::Function::Create(
3043 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
3046 cast<llvm::Function>(Func),
3047 /*IsThunk=*/false);
3048 // Create a function body that just returns
3049 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
3050 CGBuilderTy Builder(CGM, Entry);
3051 Builder.CreateRetVoid();
3052 }
3053
3054 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
3055 CGBuilderTy Builder(CGM, Entry);
3056 if (HasConstantInitialization) {
3057 // No dynamic initialization to invoke.
3058 } else if (InitIsInitFunc) {
3059 if (Init) {
3060 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
3061 if (isThreadWrapperReplaceable(VD, CGM)) {
3062 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3063 llvm::Function *Fn =
3064 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3065 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3066 }
3067 }
3068 } else if (CGM.getTriple().isOSAIX()) {
3069 // On AIX, except if constinit and also neither of class type or of
3070 // (possibly multi-dimensional) array of class type, thread_local vars
3071 // will have init routines regardless of whether they are
3072 // const-initialized. Since the routine is guaranteed to exist, we can
3073 // unconditionally call it without testing for its existance. This
3074 // avoids potentially unresolved weak symbols which the AIX linker
3075 // isn't happy with.
3076 Builder.CreateCall(InitFnTy, Init);
3077 } else {
3078 // Don't know whether we have an init function. Call it if it exists.
3079 llvm::Value *Have = Builder.CreateIsNotNull(Init);
3080 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3081 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3082 Builder.CreateCondBr(Have, InitBB, ExitBB);
3083
3084 Builder.SetInsertPoint(InitBB);
3085 Builder.CreateCall(InitFnTy, Init);
3086 Builder.CreateBr(ExitBB);
3087
3088 Builder.SetInsertPoint(ExitBB);
3089 }
3090
3091 // For a reference, the result of the wrapper function is a pointer to
3092 // the referenced object.
3093 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
3094
3095 if (VD->getType()->isReferenceType()) {
3096 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3097 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3098 }
3099
3100 Builder.CreateRet(Val);
3101 }
3102}
3103
3104LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3105 const VarDecl *VD,
3106 QualType LValType) {
3107 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3108 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3109
3110 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3111 CallVal->setCallingConv(Wrapper->getCallingConv());
3112
3113 LValue LV;
3114 if (VD->getType()->isReferenceType())
3115 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3116 else
3117 LV = CGF.MakeAddrLValue(CallVal, LValType,
3118 CGF.getContext().getDeclAlign(VD));
3119 // FIXME: need setObjCGCLValueClass?
3120 return LV;
3121}
3122
3123/// Return whether the given global decl needs a VTT parameter, which it does
3124/// if it's a base constructor or destructor with virtual bases.
3125bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3126 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3127
3128 // We don't have any virtual bases, just return early.
3129 if (!MD->getParent()->getNumVBases())
3130 return false;
3131
3132 // Check if we have a base constructor.
3133 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3134 return true;
3135
3136 // Check if we have a base destructor.
3137 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3138 return true;
3139
3140 return false;
3141}
3142
3143namespace {
3144class ItaniumRTTIBuilder {
3145 CodeGenModule &CGM; // Per-module state.
3146 llvm::LLVMContext &VMContext;
3147 const ItaniumCXXABI &CXXABI; // Per-module state.
3148
3149 /// Fields - The fields of the RTTI descriptor currently being built.
3151
3152 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3153 llvm::GlobalVariable *
3154 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3155
3156 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3157 /// descriptor of the given type.
3158 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3159
3160 /// BuildVTablePointer - Build the vtable pointer for the given type.
3161 void BuildVTablePointer(const Type *Ty);
3162
3163 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3164 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3165 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3166
3167 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3168 /// classes with bases that do not satisfy the abi::__si_class_type_info
3169 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3170 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3171
3172 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3173 /// for pointer types.
3174 void BuildPointerTypeInfo(QualType PointeeTy);
3175
3176 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3177 /// type_info for an object type.
3178 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3179
3180 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3181 /// struct, used for member pointer types.
3182 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3183
3184public:
3185 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3186 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3187
3188 // Pointer type info flags.
3189 enum {
3190 /// PTI_Const - Type has const qualifier.
3191 PTI_Const = 0x1,
3192
3193 /// PTI_Volatile - Type has volatile qualifier.
3194 PTI_Volatile = 0x2,
3195
3196 /// PTI_Restrict - Type has restrict qualifier.
3197 PTI_Restrict = 0x4,
3198
3199 /// PTI_Incomplete - Type is incomplete.
3200 PTI_Incomplete = 0x8,
3201
3202 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3203 /// (in pointer to member).
3204 PTI_ContainingClassIncomplete = 0x10,
3205
3206 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3207 //PTI_TransactionSafe = 0x20,
3208
3209 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3210 PTI_Noexcept = 0x40,
3211 };
3212
3213 // VMI type info flags.
3214 enum {
3215 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3216 VMI_NonDiamondRepeat = 0x1,
3217
3218 /// VMI_DiamondShaped - Class is diamond shaped.
3219 VMI_DiamondShaped = 0x2
3220 };
3221
3222 // Base class type info flags.
3223 enum {
3224 /// BCTI_Virtual - Base class is virtual.
3225 BCTI_Virtual = 0x1,
3226
3227 /// BCTI_Public - Base class is public.
3228 BCTI_Public = 0x2
3229 };
3230
3231 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3232 /// link to an existing RTTI descriptor if one already exists.
3233 llvm::Constant *BuildTypeInfo(QualType Ty);
3234
3235 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3236 llvm::Constant *BuildTypeInfo(
3237 QualType Ty,
3238 llvm::GlobalVariable::LinkageTypes Linkage,
3239 llvm::GlobalValue::VisibilityTypes Visibility,
3240 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3241};
3242}
3243
3244llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3245 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3246 SmallString<256> Name;
3247 llvm::raw_svector_ostream Out(Name);
3249
3250 // We know that the mangled name of the type starts at index 4 of the
3251 // mangled name of the typename, so we can just index into it in order to
3252 // get the mangled name of the type.
3253 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3254 Name.substr(4));
3255 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3256
3257 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3258 Name, Init->getType(), Linkage, Align.getAsAlign());
3259
3260 GV->setInitializer(Init);
3261
3262 return GV;
3263}
3264
3265llvm::Constant *
3266ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3267 // Mangle the RTTI name.
3268 SmallString<256> Name;
3269 llvm::raw_svector_ostream Out(Name);
3270 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3271
3272 // Look for an existing global.
3273 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3274
3275 if (!GV) {
3276 // Create a new global variable.
3277 // Note for the future: If we would ever like to do deferred emission of
3278 // RTTI, check if emitting vtables opportunistically need any adjustment.
3279
3280 GV = new llvm::GlobalVariable(
3281 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3282 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3283 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3284 CGM.setGVProperties(GV, RD);
3285 // Import the typeinfo symbol when all non-inline virtual methods are
3286 // imported.
3287 if (CGM.getTarget().hasPS4DLLImportExport()) {
3288 if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3289 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3290 CGM.setDSOLocal(GV);
3291 }
3292 }
3293 }
3294
3295 return GV;
3296}
3297
3298/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3299/// info for that type is defined in the standard library.
3301 // Itanium C++ ABI 2.9.2:
3302 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3303 // the run-time support library. Specifically, the run-time support
3304 // library should contain type_info objects for the types X, X* and
3305 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3306 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3307 // long, unsigned long, long long, unsigned long long, float, double,
3308 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3309 // half-precision floating point types.
3310 //
3311 // GCC also emits RTTI for __int128.
3312 // FIXME: We do not emit RTTI information for decimal types here.
3313
3314 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3315 switch (Ty->getKind()) {
3316 case BuiltinType::Void:
3317 case BuiltinType::NullPtr:
3318 case BuiltinType::Bool:
3319 case BuiltinType::WChar_S:
3320 case BuiltinType::WChar_U:
3321 case BuiltinType::Char_U:
3322 case BuiltinType::Char_S:
3323 case BuiltinType::UChar:
3324 case BuiltinType::SChar:
3325 case BuiltinType::Short:
3326 case BuiltinType::UShort:
3327 case BuiltinType::Int:
3328 case BuiltinType::UInt:
3329 case BuiltinType::Long:
3330 case BuiltinType::ULong:
3331 case BuiltinType::LongLong:
3332 case BuiltinType::ULongLong:
3333 case BuiltinType::Half:
3334 case BuiltinType::Float:
3335 case BuiltinType::Double:
3336 case BuiltinType::LongDouble:
3337 case BuiltinType::Float16:
3338 case BuiltinType::Float128:
3339 case BuiltinType::Ibm128:
3340 case BuiltinType::Char8:
3341 case BuiltinType::Char16:
3342 case BuiltinType::Char32:
3343 case BuiltinType::Int128:
3344 case BuiltinType::UInt128:
3345 return true;
3346
3347#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3348 case BuiltinType::Id:
3349#include "clang/Basic/OpenCLImageTypes.def"
3350#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3351 case BuiltinType::Id:
3352#include "clang/Basic/OpenCLExtensionTypes.def"
3353 case BuiltinType::OCLSampler:
3354 case BuiltinType::OCLEvent:
3355 case BuiltinType::OCLClkEvent:
3356 case BuiltinType::OCLQueue:
3357 case BuiltinType::OCLReserveID:
3358#define SVE_TYPE(Name, Id, SingletonId) \
3359 case BuiltinType::Id:
3360#include "clang/Basic/AArch64SVEACLETypes.def"
3361#define PPC_VECTOR_TYPE(Name, Id, Size) \
3362 case BuiltinType::Id:
3363#include "clang/Basic/PPCTypes.def"
3364#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3365#include "clang/Basic/RISCVVTypes.def"
3366#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3367#include "clang/Basic/WebAssemblyReferenceTypes.def"
3368 case BuiltinType::ShortAccum:
3369 case BuiltinType::Accum:
3370 case BuiltinType::LongAccum:
3371 case BuiltinType::UShortAccum:
3372 case BuiltinType::UAccum:
3373 case BuiltinType::ULongAccum:
3374 case BuiltinType::ShortFract:
3375 case BuiltinType::Fract:
3376 case BuiltinType::LongFract:
3377 case BuiltinType::UShortFract:
3378 case BuiltinType::UFract:
3379 case BuiltinType::ULongFract:
3380 case BuiltinType::SatShortAccum:
3381 case BuiltinType::SatAccum:
3382 case BuiltinType::SatLongAccum:
3383 case BuiltinType::SatUShortAccum:
3384 case BuiltinType::SatUAccum:
3385 case BuiltinType::SatULongAccum:
3386 case BuiltinType::SatShortFract:
3387 case BuiltinType::SatFract:
3388 case BuiltinType::SatLongFract:
3389 case BuiltinType::SatUShortFract:
3390 case BuiltinType::SatUFract:
3391 case BuiltinType::SatULongFract:
3392 case BuiltinType::BFloat16:
3393 return false;
3394
3395 case BuiltinType::Dependent:
3396#define BUILTIN_TYPE(Id, SingletonId)
3397#define PLACEHOLDER_TYPE(Id, SingletonId) \
3398 case BuiltinType::Id:
3399#include "clang/AST/BuiltinTypes.def"
3400 llvm_unreachable("asking for RRTI for a placeholder type!");
3401
3402 case BuiltinType::ObjCId:
3403 case BuiltinType::ObjCClass:
3404 case BuiltinType::ObjCSel:
3405 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3406 }
3407
3408 llvm_unreachable("Invalid BuiltinType Kind!");
3409}
3410
3411static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3412 QualType PointeeTy = PointerTy->getPointeeType();
3413 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3414 if (!BuiltinTy)
3415 return false;
3416
3417 // Check the qualifiers.
3418 Qualifiers Quals = PointeeTy.getQualifiers();
3419 Quals.removeConst();
3420
3421 if (!Quals.empty())
3422 return false;
3423
3424 return TypeInfoIsInStandardLibrary(BuiltinTy);
3425}
3426
3427/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3428/// information for the given type exists in the standard library.
3430 // Type info for builtin types is defined in the standard library.
3431 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3432 return TypeInfoIsInStandardLibrary(BuiltinTy);
3433
3434 // Type info for some pointer types to builtin types is defined in the
3435 // standard library.
3436 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3437 return TypeInfoIsInStandardLibrary(PointerTy);
3438
3439 return false;
3440}
3441
3442/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3443/// the given type exists somewhere else, and that we should not emit the type
3444/// information in this translation unit. Assumes that it is not a
3445/// standard-library type.
3447 QualType Ty) {
3448 ASTContext &Context = CGM.getContext();
3449
3450 // If RTTI is disabled, assume it might be disabled in the
3451 // translation unit that defines any potential key function, too.
3452 if (!Context.getLangOpts().RTTI) return false;
3453
3454 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3455 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3456 if (!RD->hasDefinition())
3457 return false;
3458
3459 if (!RD->isDynamicClass())
3460 return false;
3461
3462 // FIXME: this may need to be reconsidered if the key function
3463 // changes.
3464 // N.B. We must always emit the RTTI data ourselves if there exists a key
3465 // function.
3466 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3467
3468 // Don't import the RTTI but emit it locally.
3469 if (CGM.getTriple().isWindowsGNUEnvironment())
3470 return false;
3471
3472 if (CGM.getVTables().isVTableExternal(RD)) {
3473 if (CGM.getTarget().hasPS4DLLImportExport())
3474 return true;
3475
3476 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3477 ? false
3478 : true;
3479 }
3480 if (IsDLLImport)
3481 return true;
3482 }
3483
3484 return false;
3485}
3486
3487/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3488static bool IsIncompleteClassType(const RecordType *RecordTy) {
3489 return !RecordTy->getDecl()->isCompleteDefinition();
3490}
3491
3492/// ContainsIncompleteClassType - Returns whether the given type contains an
3493/// incomplete class type. This is true if
3494///
3495/// * The given type is an incomplete class type.
3496/// * The given type is a pointer type whose pointee type contains an
3497/// incomplete class type.
3498/// * The given type is a member pointer type whose class is an incomplete
3499/// class type.
3500/// * The given type is a member pointer type whoise pointee type contains an
3501/// incomplete class type.
3502/// is an indirect or direct pointer to an incomplete class type.
3504 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3505 if (IsIncompleteClassType(RecordTy))
3506 return true;
3507 }
3508
3509 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3510 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3511
3512 if (const MemberPointerType *MemberPointerTy =
3513 dyn_cast<MemberPointerType>(Ty)) {
3514 // Check if the class type is incomplete.
3515 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3516 if (IsIncompleteClassType(ClassType))
3517 return true;
3518
3519 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3520 }
3521
3522 return false;
3523}
3524
3525// CanUseSingleInheritance - Return whether the given record decl has a "single,
3526// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3527// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3529 // Check the number of bases.
3530 if (RD->getNumBases() != 1)
3531 return false;
3532
3533 // Get the base.
3535
3536 // Check that the base is not virtual.
3537 if (Base->isVirtual())
3538 return false;
3539
3540 // Check that the base is public.
3541 if (Base->getAccessSpecifier() != AS_public)
3542 return false;
3543
3544 // Check that the class is dynamic iff the base is.
3545 auto *BaseDecl =
3546 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3547 if (!BaseDecl->isEmpty() &&
3548 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3549 return false;
3550
3551 return true;
3552}
3553
3554void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3555 // abi::__class_type_info.
3556 static const char * const ClassTypeInfo =
3557 "_ZTVN10__cxxabiv117__class_type_infoE";
3558 // abi::__si_class_type_info.
3559 static const char * const SIClassTypeInfo =
3560 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3561 // abi::__vmi_class_type_info.
3562 static const char * const VMIClassTypeInfo =
3563 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3564
3565 const char *VTableName = nullptr;
3566
3567 switch (Ty->getTypeClass()) {
3568#define TYPE(Class, Base)
3569#define ABSTRACT_TYPE(Class, Base)
3570#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3571#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3572#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3573#include "clang/AST/TypeNodes.inc"
3574 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3575
3576 case Type::LValueReference:
3577 case Type::RValueReference:
3578 llvm_unreachable("References shouldn't get here");
3579
3580 case Type::Auto:
3581 case Type::DeducedTemplateSpecialization:
3582 llvm_unreachable("Undeduced type shouldn't get here");
3583
3584 case Type::Pipe:
3585 llvm_unreachable("Pipe types shouldn't get here");
3586
3587 case Type::Builtin:
3588 case Type::BitInt:
3589 // GCC treats vector and complex types as fundamental types.
3590 case Type::Vector:
3591 case Type::ExtVector:
3592 case Type::ConstantMatrix:
3593 case Type::Complex:
3594 case Type::Atomic:
3595 // FIXME: GCC treats block pointers as fundamental types?!
3596 case Type::BlockPointer:
3597 // abi::__fundamental_type_info.
3598 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3599 break;
3600
3601 case Type::ConstantArray:
3602 case Type::IncompleteArray:
3603 case Type::VariableArray:
3604 // abi::__array_type_info.
3605 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3606 break;
3607
3608 case Type::FunctionNoProto:
3609 case Type::FunctionProto:
3610 // abi::__function_type_info.
3611 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3612 break;
3613
3614 case Type::Enum:
3615 // abi::__enum_type_info.
3616 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3617 break;
3618
3619 case Type::Record: {
3620 const CXXRecordDecl *RD =
3621 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3622
3623 if (!RD->hasDefinition() || !RD->getNumBases()) {
3624 VTableName = ClassTypeInfo;
3625 } else if (CanUseSingleInheritance(RD)) {
3626 VTableName = SIClassTypeInfo;
3627 } else {
3628 VTableName = VMIClassTypeInfo;
3629 }
3630
3631 break;
3632 }
3633
3634 case Type::ObjCObject:
3635 // Ignore protocol qualifiers.
3636 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3637
3638 // Handle id and Class.
3639 if (isa<BuiltinType>(Ty)) {
3640 VTableName = ClassTypeInfo;
3641 break;
3642 }
3643
3644 assert(isa<ObjCInterfaceType>(Ty));
3645 [[fallthrough]];
3646
3647 case Type::ObjCInterface:
3648 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3649 VTableName = SIClassTypeInfo;
3650 } else {
3651 VTableName = ClassTypeInfo;
3652 }
3653 break;
3654
3655 case Type::ObjCObjectPointer:
3656 case Type::Pointer:
3657 // abi::__pointer_type_info.
3658 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3659 break;
3660
3661 case Type::MemberPointer:
3662 // abi::__pointer_to_member_type_info.
3663 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3664 break;
3665 }
3666
3667 llvm::Constant *VTable = nullptr;
3668
3669 // Check if the alias exists. If it doesn't, then get or create the global.
3671 VTable = CGM.getModule().getNamedAlias(VTableName);
3672 if (!VTable) {
3673 llvm::Type *Ty = llvm::ArrayType::get(CGM.GlobalsInt8PtrTy, 0);
3674 VTable = CGM.getModule().getOrInsertGlobal(VTableName, Ty);
3675 }
3676
3677 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3678
3679 llvm::Type *PtrDiffTy =
3681
3682 // The vtable address point is 2.
3684 // The vtable address point is 8 bytes after its start:
3685 // 4 for the offset to top + 4 for the relative offset to rtti.
3686 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3687 VTable =
3688 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3689 } else {
3690 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3691 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy,
3692 VTable, Two);
3693 }
3694
3695 Fields.push_back(VTable);
3696}
3697
3698/// Return the linkage that the type info and type info name constants
3699/// should have for the given type.
3700static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3701 QualType Ty) {
3702 // Itanium C++ ABI 2.9.5p7:
3703 // In addition, it and all of the intermediate abi::__pointer_type_info
3704 // structs in the chain down to the abi::__class_type_info for the
3705 // incomplete class type must be prevented from resolving to the
3706 // corresponding type_info structs for the complete class type, possibly
3707 // by making them local static objects. Finally, a dummy class RTTI is
3708 // generated for the incomplete type that will not resolve to the final
3709 // complete class RTTI (because the latter need not exist), possibly by
3710 // making it a local static object.
3712 return llvm::GlobalValue::InternalLinkage;
3713
3714 switch (Ty->getLinkage()) {
3715 case Linkage::Invalid:
3716 llvm_unreachable("Linkage hasn't been computed!");
3717
3718 case Linkage::None:
3719 case Linkage::Internal:
3721 return llvm::GlobalValue::InternalLinkage;
3722
3724 case Linkage::Module:
3725 case Linkage::External:
3726 // RTTI is not enabled, which means that this type info struct is going
3727 // to be used for exception handling. Give it linkonce_odr linkage.
3728 if (!CGM.getLangOpts().RTTI)
3729 return llvm::GlobalValue::LinkOnceODRLinkage;
3730
3731 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3732 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3733 if (RD->hasAttr<WeakAttr>())
3734 return llvm::GlobalValue::WeakODRLinkage;
3735 if (CGM.getTriple().isWindowsItaniumEnvironment())
3736 if (RD->hasAttr<DLLImportAttr>() &&
3738 return llvm::GlobalValue::ExternalLinkage;
3739 // MinGW always uses LinkOnceODRLinkage for type info.
3740 if (RD->isDynamicClass() &&
3741 !CGM.getContext()
3742 .getTargetInfo()
3743 .getTriple()
3744 .isWindowsGNUEnvironment())
3745 return CGM.getVTableLinkage(RD);
3746 }
3747
3748 return llvm::GlobalValue::LinkOnceODRLinkage;
3749 }
3750
3751 llvm_unreachable("Invalid linkage!");
3752}
3753
3754llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3755 // We want to operate on the canonical type.
3756 Ty = Ty.getCanonicalType();
3757
3758 // Check if we've already emitted an RTTI descriptor for this type.
3759 SmallString<256> Name;
3760 llvm::raw_svector_ostream Out(Name);
3761 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3762
3763 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3764 if (OldGV && !OldGV->isDeclaration()) {
3765 assert(!OldGV->hasAvailableExternallyLinkage() &&
3766 "available_externally typeinfos not yet implemented");
3767
3768 return OldGV;
3769 }
3770
3771 // Check if there is already an external RTTI descriptor for this type.
3774 return GetAddrOfExternalRTTIDescriptor(Ty);
3775
3776 // Emit the standard library with external linkage.
3777 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3778
3779 // Give the type_info object and name the formal visibility of the
3780 // type itself.
3781 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3782 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3783 // If the linkage is local, only default visibility makes sense.
3784 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3785 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3786 ItaniumCXXABI::RUK_NonUniqueHidden)
3787 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3788 else
3789 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3790
3791 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3792 llvm::GlobalValue::DefaultStorageClass;
3793 if (auto RD = Ty->getAsCXXRecordDecl()) {
3794 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3795 RD->hasAttr<DLLExportAttr>()) ||
3797 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3798 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3799 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3800 }
3801 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3802}
3803
3804llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3805 QualType Ty,
3806 llvm::GlobalVariable::LinkageTypes Linkage,
3807 llvm::GlobalValue::VisibilityTypes Visibility,
3808 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3809 // Add the vtable pointer.
3810 BuildVTablePointer(cast<Type>(Ty));
3811
3812 // And the name.
3813 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3814 llvm::Constant *TypeNameField;
3815
3816 // If we're supposed to demote the visibility, be sure to set a flag
3817 // to use a string comparison for type_info comparisons.
3818 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3819 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3820 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3821 // The flag is the sign bit, which on ARM64 is defined to be clear
3822 // for global pointers. This is very ARM64-specific.
3823 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3824 llvm::Constant *flag =
3825 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3826 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3827 TypeNameField =
3828 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy);
3829 } else {
3830 TypeNameField = TypeName;
3831 }
3832 Fields.push_back(TypeNameField);
3833
3834 switch (Ty->getTypeClass()) {
3835#define TYPE(Class, Base)
3836#define ABSTRACT_TYPE(Class, Base)
3837#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3838#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3839#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3840#include "clang/AST/TypeNodes.inc"
3841 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3842
3843 // GCC treats vector types as fundamental types.
3844 case Type::Builtin:
3845 case Type::Vector:
3846 case Type::ExtVector:
3847 case Type::ConstantMatrix:
3848 case Type::Complex:
3849 case Type::BlockPointer:
3850 // Itanium C++ ABI 2.9.5p4:
3851 // abi::__fundamental_type_info adds no data members to std::type_info.
3852 break;
3853
3854 case Type::LValueReference:
3855 case Type::RValueReference:
3856 llvm_unreachable("References shouldn't get here");
3857
3858 case Type::Auto:
3859 case Type::DeducedTemplateSpecialization:
3860 llvm_unreachable("Undeduced type shouldn't get here");
3861
3862 case Type::Pipe:
3863 break;
3864
3865 case Type::BitInt:
3866 break;
3867
3868 case Type::ConstantArray:
3869 case Type::IncompleteArray:
3870 case Type::VariableArray:
3871 // Itanium C++ ABI 2.9.5p5:
3872 // abi::__array_type_info adds no data members to std::type_info.
3873 break;
3874
3875 case Type::FunctionNoProto:
3876 case Type::FunctionProto:
3877 // Itanium C++ ABI 2.9.5p5:
3878 // abi::__function_type_info adds no data members to std::type_info.
3879 break;
3880
3881 case Type::Enum:
3882 // Itanium C++ ABI 2.9.5p5:
3883 // abi::__enum_type_info adds no data members to std::type_info.
3884 break;
3885
3886 case Type::Record: {
3887 const CXXRecordDecl *RD =
3888 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3889 if (!RD->hasDefinition() || !RD->getNumBases()) {
3890 // We don't need to emit any fields.
3891 break;
3892 }
3893
3895 BuildSIClassTypeInfo(RD);
3896 else
3897 BuildVMIClassTypeInfo(RD);
3898
3899 break;
3900 }
3901
3902 case Type::ObjCObject:
3903 case Type::ObjCInterface:
3904 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3905 break;
3906
3907 case Type::ObjCObjectPointer:
3908 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3909 break;
3910
3911 case Type::Pointer:
3912 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3913 break;
3914
3915 case Type::MemberPointer:
3916 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3917 break;
3918
3919 case Type::Atomic:
3920 // No fields, at least for the moment.
3921 break;
3922 }
3923
3924 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3925
3926 SmallString<256> Name;
3927 llvm::raw_svector_ostream Out(Name);
3928 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3929 llvm::Module &M = CGM.getModule();
3930 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3931 llvm::GlobalVariable *GV =
3932 new llvm::GlobalVariable(M, Init->getType(),
3933 /*isConstant=*/true, Linkage, Init, Name);
3934
3935 // Export the typeinfo in the same circumstances as the vtable is exported.
3936 auto GVDLLStorageClass = DLLStorageClass;
3937 if (CGM.getTarget().hasPS4DLLImportExport()) {
3938 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3939 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3940 if (RD->hasAttr<DLLExportAttr>() ||
3941 CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3942 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3943 }
3944 }
3945 }
3946
3947 // If there's already an old global variable, replace it with the new one.
3948 if (OldGV) {
3949 GV->takeName(OldGV);
3950 OldGV->replaceAllUsesWith(GV);
3951 OldGV->eraseFromParent();
3952 }
3953
3954 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3955 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3956
3959 GV->setAlignment(Align.getAsAlign());
3960
3961 // The Itanium ABI specifies that type_info objects must be globally
3962 // unique, with one exception: if the type is an incomplete class
3963 // type or a (possibly indirect) pointer to one. That exception
3964 // affects the general case of comparing type_info objects produced
3965 // by the typeid operator, which is why the comparison operators on
3966 // std::type_info generally use the type_info name pointers instead
3967 // of the object addresses. However, the language's built-in uses
3968 // of RTTI generally require class types to be complete, even when
3969 // manipulating pointers to those class types. This allows the
3970 // implementation of dynamic_cast to rely on address equality tests,
3971 // which is much faster.
3972
3973 // All of this is to say that it's important that both the type_info
3974 // object and the type_info name be uniqued when weakly emitted.
3975
3976 TypeName->setVisibility(Visibility);
3977 CGM.setDSOLocal(TypeName);
3978
3979 GV->setVisibility(Visibility);
3980 CGM.setDSOLocal(GV);
3981
3982 TypeName->setDLLStorageClass(DLLStorageClass);
3983 GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3984 ? GVDLLStorageClass
3985 : DLLStorageClass);
3986
3987 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3988 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3989
3990 return GV;
3991}
3992
3993/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3994/// for the given Objective-C object type.
3995void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3996 // Drop qualifiers.
3997 const Type *T = OT->getBaseType().getTypePtr();
3998 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3999
4000 // The builtin types are abi::__class_type_infos and don't require
4001 // extra fields.
4002 if (isa<BuiltinType>(T)) return;
4003
4004 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
4005 ObjCInterfaceDecl *Super = Class->getSuperClass();
4006
4007 // Root classes are also __class_type_info.
4008 if (!Super) return;
4009
4010 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
4011
4012 // Everything else is single inheritance.
4013 llvm::Constant *BaseTypeInfo =
4014 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
4015 Fields.push_back(BaseTypeInfo);
4016}
4017
4018/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4019/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4020void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4021 // Itanium C++ ABI 2.9.5p6b:
4022 // It adds to abi::__class_type_info a single member pointing to the
4023 // type_info structure for the base type,
4024 llvm::Constant *BaseTypeInfo =
4025 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
4026 Fields.push_back(BaseTypeInfo);
4027}
4028
4029namespace {
4030 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4031 /// a class hierarchy.
4032 struct SeenBases {
4035 };
4036}
4037
4038/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4039/// abi::__vmi_class_type_info.
4040///
4042 SeenBases &Bases) {
4043
4044 unsigned Flags = 0;
4045
4046 auto *BaseDecl =
4047 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
4048
4049 if (Base->isVirtual()) {
4050 // Mark the virtual base as seen.
4051 if (!Bases.VirtualBases.insert(BaseDecl).second) {
4052 // If this virtual base has been seen before, then the class is diamond
4053 // shaped.
4054 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4055 } else {
4056 if (Bases.NonVirtualBases.count(BaseDecl))
4057 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4058 }
4059 } else {
4060 // Mark the non-virtual base as seen.
4061 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
4062 // If this non-virtual base has been seen before, then the class has non-
4063 // diamond shaped repeated inheritance.
4064 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4065 } else {
4066 if (Bases.VirtualBases.count(BaseDecl))
4067 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4068 }
4069 }
4070
4071 // Walk all bases.
4072 for (const auto &I : BaseDecl->bases())
4073 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4074
4075 return Flags;
4076}
4077
4079 unsigned Flags = 0;
4080 SeenBases Bases;
4081
4082 // Walk all bases.
4083 for (const auto &I : RD->bases())
4084 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4085
4086 return Flags;
4087}
4088
4089/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4090/// classes with bases that do not satisfy the abi::__si_class_type_info
4091/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4092void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4093 llvm::Type *UnsignedIntLTy =
4095
4096 // Itanium C++ ABI 2.9.5p6c:
4097 // __flags is a word with flags describing details about the class
4098 // structure, which may be referenced by using the __flags_masks
4099 // enumeration. These flags refer to both direct and indirect bases.
4100 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4101 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4102
4103 // Itanium C++ ABI 2.9.5p6c:
4104 // __base_count is a word with the number of direct proper base class
4105 // descriptions that follow.
4106 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4107
4108 if (!RD->getNumBases())
4109 return;
4110
4111 // Now add the base class descriptions.
4112
4113 // Itanium C++ ABI 2.9.5p6c:
4114 // __base_info[] is an array of base class descriptions -- one for every
4115 // direct proper base. Each description is of the type:
4116 //
4117 // struct abi::__base_class_type_info {
4118 // public:
4119 // const __class_type_info *__base_type;
4120 // long __offset_flags;
4121 //
4122 // enum __offset_flags_masks {
4123 // __virtual_mask = 0x1,
4124 // __public_mask = 0x2,
4125 // __offset_shift = 8
4126 // };
4127 // };
4128
4129 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4130 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4131 // LLP64 platforms.
4132 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4133 // LLP64 platforms.
4134 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4135 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4136 if (TI.getTriple().isOSCygMing() &&
4138 OffsetFlagsTy = CGM.getContext().LongLongTy;
4139 llvm::Type *OffsetFlagsLTy =
4140 CGM.getTypes().ConvertType(OffsetFlagsTy);
4141
4142 for (const auto &Base : RD->bases()) {
4143 // The __base_type member points to the RTTI for the base type.
4144 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4145
4146 auto *BaseDecl =
4147 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4148
4149 int64_t OffsetFlags = 0;
4150
4151 // All but the lower 8 bits of __offset_flags are a signed offset.
4152 // For a non-virtual base, this is the offset in the object of the base
4153 // subobject. For a virtual base, this is the offset in the virtual table of
4154 // the virtual base offset for the virtual base referenced (negative).
4155 CharUnits Offset;
4156 if (Base.isVirtual())
4157 Offset =
4159 else {
4160 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4161 Offset = Layout.getBaseClassOffset(BaseDecl);
4162 };
4163
4164 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4165
4166 // The low-order byte of __offset_flags contains flags, as given by the
4167 // masks from the enumeration __offset_flags_masks.
4168 if (Base.isVirtual())
4169 OffsetFlags |= BCTI_Virtual;
4170 if (Base.getAccessSpecifier() == AS_public)
4171 OffsetFlags |= BCTI_Public;
4172
4173 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4174 }
4175}
4176
4177/// Compute the flags for a __pbase_type_info, and remove the corresponding
4178/// pieces from \p Type.
4180 unsigned Flags = 0;
4181
4182 if (Type.isConstQualified())
4183 Flags |= ItaniumRTTIBuilder::PTI_Const;
4184 if (Type.isVolatileQualified())
4185 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4186 if (Type.isRestrictQualified())
4187 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4188 Type = Type.getUnqualifiedType();
4189
4190 // Itanium C++ ABI 2.9.5p7:
4191 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4192 // incomplete class type, the incomplete target type flag is set.
4194 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4195
4196 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4197 if (Proto->isNothrow()) {
4198 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4200 }
4201 }
4202
4203 return Flags;
4204}
4205
4206/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4207/// used for pointer types.
4208void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4209 // Itanium C++ ABI 2.9.5p7:
4210 // __flags is a flag word describing the cv-qualification and other
4211 // attributes of the type pointed to
4212 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4213
4214 llvm::Type *UnsignedIntLTy =
4216 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4217
4218 // Itanium C++ ABI 2.9.5p7:
4219 // __pointee is a pointer to the std::type_info derivation for the
4220 // unqualified type being pointed to.
4221 llvm::Constant *PointeeTypeInfo =
4222 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4223 Fields.push_back(PointeeTypeInfo);
4224}
4225
4226/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4227/// struct, used for member pointer types.
4228void
4229ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4230 QualType PointeeTy = Ty->getPointeeType();
4231
4232 // Itanium C++ ABI 2.9.5p7:
4233 // __flags is a flag word describing the cv-qualification and other
4234 // attributes of the type pointed to.
4235 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4236
4237 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4238 if (IsIncompleteClassType(ClassType))
4239 Flags |= PTI_ContainingClassIncomplete;
4240
4241 llvm::Type *UnsignedIntLTy =
4243 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4244
4245 // Itanium C++ ABI 2.9.5p7:
4246 // __pointee is a pointer to the std::type_info derivation for the
4247 // unqualified type being pointed to.
4248 llvm::Constant *PointeeTypeInfo =
4249 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4250 Fields.push_back(PointeeTypeInfo);
4251
4252 // Itanium C++ ABI 2.9.5p9:
4253 // __context is a pointer to an abi::__class_type_info corresponding to the
4254 // class type containing the member pointed to
4255 // (e.g., the "A" in "int A::*").
4256 Fields.push_back(
4257 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4258}
4259
4260llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4261 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4262}
4263
4264void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4265 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4266 QualType FundamentalTypes[] = {
4267 getContext().VoidTy, getContext().NullPtrTy,
4268 getContext().BoolTy, getContext().WCharTy,
4269 getContext().CharTy, getContext().UnsignedCharTy,
4270 getContext().SignedCharTy, getContext().ShortTy,
4271 getContext().UnsignedShortTy, getContext().IntTy,
4272 getContext().UnsignedIntTy, getContext().LongTy,
4273 getContext().UnsignedLongTy, getContext().LongLongTy,
4274 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4275 getContext().UnsignedInt128Ty, getContext().HalfTy,
4276 getContext().FloatTy, getContext().DoubleTy,
4277 getContext().LongDoubleTy, getContext().Float128Ty,
4278 getContext().Char8Ty, getContext().Char16Ty,
4279 getContext().Char32Ty
4280 };
4281 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4282 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4283 ? llvm::GlobalValue::DLLExportStorageClass
4284 : llvm::GlobalValue::DefaultStorageClass;
4285 llvm::GlobalValue::VisibilityTypes Visibility =
4287 for (const QualType &FundamentalType : FundamentalTypes) {
4288 QualType PointerType = getContext().getPointerType(FundamentalType);
4289 QualType PointerTypeConst = getContext().getPointerType(
4290 FundamentalType.withConst());
4291 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4292 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4293 Type, llvm::GlobalValue::ExternalLinkage,
4294 Visibility, DLLStorageClass);
4295 }
4296}
4297
4298/// What sort of uniqueness rules should we use for the RTTI for the
4299/// given type?
4300ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4301 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4302 if (shouldRTTIBeUnique())
4303 return RUK_Unique;
4304
4305 // It's only necessary for linkonce_odr or weak_odr linkage.
4306 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4307 Linkage != llvm::GlobalValue::WeakODRLinkage)
4308 return RUK_Unique;
4309
4310 // It's only necessary with default visibility.
4311 if (CanTy->getVisibility() != DefaultVisibility)
4312 return RUK_Unique;
4313
4314 // If we're not required to publish this symbol, hide it.
4315 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4316 return RUK_NonUniqueHidden;
4317
4318 // If we're required to publish this symbol, as we might be under an
4319 // explicit instantiation, leave it with default visibility but
4320 // enable string-comparisons.
4321 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4322 return RUK_NonUniqueVisible;
4323}
4324
4325// Find out how to codegen the complete destructor and constructor
4326namespace {
4327enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4328}
4329static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4330 const CXXMethodDecl *MD) {
4331 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4332 return StructorCodegen::Emit;
4333
4334 // The complete and base structors are not equivalent if there are any virtual
4335 // bases, so emit separate functions.
4336 if (MD->getParent()->getNumVBases())
4337 return StructorCodegen::Emit;
4338
4340 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4342 } else {
4343 const auto *CD = cast<CXXConstructorDecl>(MD);
4345 }
4346 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4347
4348 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4349 return StructorCodegen::RAUW;
4350
4351 // FIXME: Should we allow available_externally aliases?
4352 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4353 return StructorCodegen::RAUW;
4354
4355 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4356 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4357 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4358 CGM.getTarget().getTriple().isOSBinFormatWasm())
4359 return StructorCodegen::COMDAT;
4360 return StructorCodegen::Emit;
4361 }
4362
4363 return StructorCodegen::Alias;
4364}
4365
4368 GlobalDecl TargetDecl) {
4369 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4370
4371 StringRef MangledName = CGM.getMangledName(AliasDecl);
4372 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4373 if (Entry && !Entry->isDeclaration())
4374 return;
4375
4376 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4377
4378 // Create the alias with no name.
4379 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4380
4381 // Constructors and destructors are always unnamed_addr.
4382 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4383
4384 // Switch any previous uses to the alias.
4385 if (Entry) {
4386 assert(Entry->getType() == Aliasee->getType() &&
4387 "declaration exists with different type");
4388 Alias->takeName(Entry);
4389 Entry->replaceAllUsesWith(Alias);
4390 Entry->eraseFromParent();
4391 } else {
4392 Alias->setName(MangledName);
4393 }
4394
4395 // Finally, set up the alias with its proper name and attributes.
4396 CGM.SetCommonAttributes(AliasDecl, Alias);
4397}
4398
4399void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4400 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4401 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4402 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4403
4404 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4405
4406 if (CD ? GD.getCtorType() == Ctor_Complete
4407 : GD.getDtorType() == Dtor_Complete) {
4408 GlobalDecl BaseDecl;
4409 if (CD)
4410 BaseDecl = GD.getWithCtorType(Ctor_Base);
4411 else
4412 BaseDecl = GD.getWithDtorType(Dtor_Base);
4413
4414 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4415 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4416 return;
4417 }
4418
4419 if (CGType == StructorCodegen::RAUW) {
4420 StringRef MangledName = CGM.getMangledName(GD);
4421 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4422 CGM.addReplacement(MangledName, Aliasee);
4423 return;
4424 }
4425 }
4426
4427 // The base destructor is equivalent to the base destructor of its
4428 // base class if there is exactly one non-virtual base class with a
4429 // non-trivial destructor, there are no fields with a non-trivial
4430 // destructor, and the body of the destructor is trivial.
4431 if (DD && GD.getDtorType() == Dtor_Base &&
4432 CGType != StructorCodegen::COMDAT &&
4434 return;
4435
4436 // FIXME: The deleting destructor is equivalent to the selected operator
4437 // delete if:
4438 // * either the delete is a destroying operator delete or the destructor
4439 // would be trivial if it weren't virtual,
4440 // * the conversion from the 'this' parameter to the first parameter of the
4441 // destructor is equivalent to a bitcast,
4442 // * the destructor does not have an implicit "this" return, and
4443 // * the operator delete has the same calling convention and IR function type
4444 // as the destructor.
4445 // In such cases we should try to emit the deleting dtor as an alias to the
4446 // selected 'operator delete'.
4447
4448 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4449
4450 if (CGType == StructorCodegen::COMDAT) {
4451 SmallString<256> Buffer;
4452 llvm::raw_svector_ostream Out(Buffer);
4453 if (DD)
4454 getMangleContext().mangleCXXDtorComdat(DD, Out);
4455 else
4456 getMangleContext().mangleCXXCtorComdat(CD, Out);
4457 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4458 Fn->setComdat(C);
4459 } else {
4460 CGM.maybeSetTrivialComdat(*MD, *Fn);
4461 }
4462}
4463
4464static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4465 // void *__cxa_begin_catch(void*);
4466 llvm::FunctionType *FTy = llvm::FunctionType::get(
4467 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4468
4469 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4470}
4471
4472static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4473 // void __cxa_end_catch();
4474 llvm::FunctionType *FTy =
4475 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4476
4477 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4478}
4479
4480static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4481 // void *__cxa_get_exception_ptr(void*);
4482 llvm::FunctionType *FTy = llvm::FunctionType::get(
4483 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4484
4485 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4486}
4487
4488namespace {
4489 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4490 /// exception type lets us state definitively that the thrown exception
4491 /// type does not have a destructor. In particular:
4492 /// - Catch-alls tell us nothing, so we have to conservatively
4493 /// assume that the thrown exception might have a destructor.
4494 /// - Catches by reference behave according to their base types.
4495 /// - Catches of non-record types will only trigger for exceptions
4496 /// of non-record types, which never have destructors.
4497 /// - Catches of record types can trigger for arbitrary subclasses
4498 /// of the caught type, so we have to assume the actual thrown
4499 /// exception type might have a throwing destructor, even if the
4500 /// caught type's destructor is trivial or nothrow.
4501 struct CallEndCatch final : EHScopeStack::Cleanup {
4502 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4503 bool MightThrow;
4504
4505 void Emit(CodeGenFunction &CGF, Flags flags) override {
4506 if (!MightThrow) {
4508 return;
4509 }
4510
4512 }
4513 };
4514}
4515
4516/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4517/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
4518/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
4519/// call can be marked as nounwind even if EndMightThrow is true.
4520///
4521/// \param EndMightThrow - true if __cxa_end_catch might throw
4522static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4523 llvm::Value *Exn,
4524 bool EndMightThrow) {
4525 llvm::CallInst *call =
4527
4528 CGF.EHStack.pushCleanup<CallEndCatch>(
4530 EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
4531
4532 return call;
4533}
4534
4535/// A "special initializer" callback for initializing a catch
4536/// parameter during catch initialization.
4538 const VarDecl &CatchParam,
4539 Address ParamAddr,
4540 SourceLocation Loc) {
4541 // Load the exception from where the landing pad saved it.
4542 llvm::Value *Exn = CGF.getExceptionFromSlot();
4543
4544 CanQualType CatchType =
4545 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4546 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4547
4548 // If we're catching by reference, we can just cast the object
4549 // pointer to the appropriate pointer.
4550 if (isa<ReferenceType>(CatchType)) {
4551 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4552 bool EndCatchMightThrow = CaughtType->isRecordType();
4553
4554 // __cxa_begin_catch returns the adjusted object pointer.
4555 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4556
4557 // We have no way to tell the personality function that we're
4558 // catching by reference, so if we're catching a pointer,
4559 // __cxa_begin_catch will actually return that pointer by value.
4560 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4561 QualType PointeeType = PT->getPointeeType();
4562
4563 // When catching by reference, generally we should just ignore
4564 // this by-value pointer and use the exception object instead.
4565 if (!PointeeType->isRecordType()) {
4566
4567 // Exn points to the struct _Unwind_Exception header, which
4568 // we have to skip past in order to reach the exception data.
4569 unsigned HeaderSize =
4571 AdjustedExn =
4572 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4573
4574 // However, if we're catching a pointer-to-record type that won't
4575 // work, because the personality function might have adjusted
4576 // the pointer. There's actually no way for us to fully satisfy
4577 // the language/ABI contract here: we can't use Exn because it
4578 // might have the wrong adjustment, but we can't use the by-value
4579 // pointer because it's off by a level of abstraction.
4580 //
4581 // The current solution is to dump the adjusted pointer into an
4582 // alloca, which breaks language semantics (because changing the
4583 // pointer doesn't change the exception) but at least works.
4584 // The better solution would be to filter out non-exact matches
4585 // and rethrow them, but this is tricky because the rethrow
4586 // really needs to be catchable by other sites at this landing
4587 // pad. The best solution is to fix the personality function.
4588 } else {
4589 // Pull the pointer for the reference type off.
4590 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4591
4592 // Create the temporary and write the adjusted pointer into it.
4593 Address ExnPtrTmp =
4594 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4595 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4596 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4597
4598 // Bind the reference to the temporary.
4599 AdjustedExn = ExnPtrTmp.getPointer();
4600 }
4601 }
4602
4603 llvm::Value *ExnCast =
4604 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4605 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4606 return;
4607 }
4608
4609 // Scalars and complexes.
4610 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4611 if (TEK != TEK_Aggregate) {
4612 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4613
4614 // If the catch type is a pointer type, __cxa_begin_catch returns
4615 // the pointer by value.
4616 if (CatchType->hasPointerRepresentation()) {
4617 llvm::Value *CastExn =
4618 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4619
4620 switch (CatchType.getQualifiers().getObjCLifetime()) {
4622 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4623 [[fallthrough]];
4624
4628 CGF.Builder.CreateStore(CastExn, ParamAddr);
4629 return;
4630
4632 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4633 return;
4634 }
4635 llvm_unreachable("bad ownership qualifier!");
4636 }
4637
4638 // Otherwise, it returns a pointer into the exception object.
4639
4640 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType);
4641 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4642 switch (TEK) {
4643 case TEK_Complex:
4644 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4645 /*init*/ true);
4646 return;
4647 case TEK_Scalar: {
4648 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4649 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4650 return;
4651 }
4652 case TEK_Aggregate:
4653 llvm_unreachable("evaluation kind filtered out!");
4654 }
4655 llvm_unreachable("bad evaluation kind");
4656 }
4657
4658 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4659 auto catchRD = CatchType->getAsCXXRecordDecl();
4660 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4661
4662 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
4663
4664 // Check for a copy expression. If we don't have a copy expression,
4665 // that means a trivial copy is okay.
4666 const Expr *copyExpr = CatchParam.getInit();
4667 if (!copyExpr) {
4668 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4669 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4670 LLVMCatchTy, caughtExnAlignment);
4671 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4672 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4673 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4674 return;
4675 }
4676
4677 // We have to call __cxa_get_exception_ptr to get the adjusted
4678 // pointer before copying.
4679 llvm::CallInst *rawAdjustedExn =
4681
4682 // Cast that to the appropriate type.
4683 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4684 LLVMCatchTy, caughtExnAlignment);
4685
4686 // The copy expression is defined in terms of an OpaqueValueExpr.
4687 // Find it and map it to the adjusted expression.
4688 CodeGenFunction::OpaqueValueMapping
4689 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4690 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4691
4692 // Call the copy ctor in a terminate scope.
4693 CGF.EHStack.pushTerminate();
4694
4695 // Perform the copy construction.
4696 CGF.EmitAggExpr(copyExpr,
4697 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4702
4703 // Leave the terminate scope.
4704 CGF.EHStack.popTerminate();
4705
4706 // Undo the opaque value mapping.
4707 opaque.pop();
4708
4709 // Finally we can call __cxa_begin_catch.
4710 CallBeginCatch(CGF, Exn, true);
4711}
4712
4713/// Begins a catch statement by initializing the catch variable and
4714/// calling __cxa_begin_catch.
4715void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4716 const CXXCatchStmt *S) {
4717 // We have to be very careful with the ordering of cleanups here:
4718 // C++ [except.throw]p4:
4719 // The destruction [of the exception temporary] occurs
4720 // immediately after the destruction of the object declared in
4721 // the exception-declaration in the handler.
4722 //
4723 // So the precise ordering is:
4724 // 1. Construct catch variable.
4725 // 2. __cxa_begin_catch
4726 // 3. Enter __cxa_end_catch cleanup
4727 // 4. Enter dtor cleanup
4728 //
4729 // We do this by using a slightly abnormal initialization process.
4730 // Delegation sequence:
4731 // - ExitCXXTryStmt opens a RunCleanupsScope
4732 // - EmitAutoVarAlloca creates the variable and debug info
4733 // - InitCatchParam initializes the variable from the exception
4734 // - CallBeginCatch calls __cxa_begin_catch
4735 // - CallBeginCatch enters the __cxa_end_catch cleanup
4736 // - EmitAutoVarCleanups enters the variable destructor cleanup
4737 // - EmitCXXTryStmt emits the code for the catch body
4738 // - EmitCXXTryStmt close the RunCleanupsScope
4739
4740 VarDecl *CatchParam = S->getExceptionDecl();
4741 if (!CatchParam) {
4742 llvm::Value *Exn = CGF.getExceptionFromSlot();
4743 CallBeginCatch(CGF, Exn, true);
4744 return;
4745 }
4746
4747 // Emit the local.
4748 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4749 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4750 CGF.EmitAutoVarCleanups(var);
4751}
4752
4753/// Get or define the following function:
4754/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4755/// This code is used only in C++.
4756static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4757 ASTContext &C = CGM.getContext();
4759 C.VoidTy, {C.getPointerType(C.CharTy)});
4760 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
4761 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4762 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4763 llvm::Function *fn =
4764 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4765 if (fn->empty()) {
4766 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
4768 fn->setDoesNotThrow();
4769 fn->setDoesNotReturn();
4770
4771 // What we really want is to massively penalize inlining without
4772 // forbidding it completely. The difference between that and
4773 // 'noinline' is negligible.
4774 fn->addFnAttr(llvm::Attribute::NoInline);
4775
4776 // Allow this function to be shared across translation units, but
4777 // we don't want it to turn into an exported symbol.
4778 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4779 fn->setVisibility(llvm::Function::HiddenVisibility);
4780 if (CGM.supportsCOMDAT())
4781 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4782
4783 // Set up the function.
4784 llvm::BasicBlock *entry =
4785 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4786 CGBuilderTy builder(CGM, entry);
4787
4788 // Pull the exception pointer out of the parameter list.
4789 llvm::Value *exn = &*fn->arg_begin();
4790
4791 // Call __cxa_begin_catch(exn).
4792 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4793 catchCall->setDoesNotThrow();
4794 catchCall->setCallingConv(CGM.getRuntimeCC());
4795
4796 // Call std::terminate().
4797 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4798 termCall->setDoesNotThrow();
4799 termCall->setDoesNotReturn();
4800 termCall->setCallingConv(CGM.getRuntimeCC());
4801
4802 // std::terminate cannot return.
4803 builder.CreateUnreachable();
4804 }
4805 return fnRef;
4806}
4807
4808llvm::CallInst *
4809ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4810 llvm::Value *Exn) {
4811 // In C++, we want to call __cxa_begin_catch() before terminating.
4812 if (Exn) {
4813 assert(CGF.CGM.getLangOpts().CPlusPlus);
4815 }
4816 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4817}
4818
4819std::pair<llvm::Value *, const CXXRecordDecl *>
4820ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4821 const CXXRecordDecl *RD) {
4822 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4823}
4824
4825void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4826 const CXXCatchStmt *C) {
4827 if (CGF.getTarget().hasFeature("exception-handling"))
4828 CGF.EHStack.pushCleanup<CatchRetScope>(
4829 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4830 ItaniumCXXABI::emitBeginCatch(CGF, C);
4831}
4832
4833llvm::CallInst *
4834WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4835 llvm::Value *Exn) {
4836 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4837 // the violating exception to mark it handled, but it is currently hard to do
4838 // with wasm EH instruction structure with catch/catch_all, we just call
4839 // std::terminate and ignore the violating exception as in CGCXXABI.
4840 // TODO Consider code transformation that makes calling __clang_call_terminate
4841 // possible.
4843}
4844
4845/// Register a global destructor as best as we know how.
4846void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4847 llvm::FunctionCallee Dtor,
4848 llvm::Constant *Addr) {
4849 if (D.getTLSKind() != VarDecl::TLS_None) {
4850 llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
4851
4852 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4853 llvm::FunctionType *AtExitTy =
4854 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true);
4855
4856 // Fetch the actual function.
4857 llvm::FunctionCallee AtExit =
4858 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4859
4860 // Create __dtor function for the var decl.
4861 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4862
4863 // Register above __dtor with atexit().
4864 // First param is flags and must be 0, second param is function ptr
4865 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4866 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4867
4868 // Cannot unregister TLS __dtor so done
4869 return;
4870 }
4871
4872 // Create __dtor function for the var decl.
4873 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4874
4875 // Register above __dtor with atexit().
4876 CGF.registerGlobalDtorWithAtExit(DtorStub);
4877
4878 // Emit __finalize function to unregister __dtor and (as appropriate) call
4879 // __dtor.
4880 emitCXXStermFinalizer(D, DtorStub, Addr);
4881}
4882
4883void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4884 llvm::Constant *addr) {
4885 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4886 SmallString<256> FnName;
4887 {
4888 llvm::raw_svector_ostream Out(FnName);
4889 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4890 }
4891
4892 // Create the finalization action associated with a variable.
4894 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4895 FTy, FnName.str(), FI, D.getLocation());
4896
4897 CodeGenFunction CGF(CGM);
4898
4899 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4901 D.getInit()->getExprLoc());
4902
4903 // The unatexit subroutine unregisters __dtor functions that were previously
4904 // registered by the atexit subroutine. If the referenced function is found,
4905 // the unatexit returns a value of 0, meaning that the cleanup is still
4906 // pending (and we should call the __dtor function).
4907 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4908
4909 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4910
4911 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4912 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4913
4914 // Check if unatexit returns a value of 0. If it does, jump to
4915 // DestructCallBlock, otherwise jump to EndBlock directly.
4916 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4917
4918 CGF.EmitBlock(DestructCallBlock);
4919
4920 // Emit the call to dtorStub.
4921 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4922
4923 // Make sure the call and the callee agree on calling convention.
4924 CI->setCallingConv(dtorStub->getCallingConv());
4925
4926 CGF.EmitBlock(EndBlock);
4927
4928 CGF.FinishFunction();
4929
4930 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4931 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4932 IPA->getPriority());
4934 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4935 // According to C++ [basic.start.init]p2, class template static data
4936 // members (i.e., implicitly or explicitly instantiated specializations)
4937 // have unordered initialization. As a consequence, we can put them into
4938 // their own llvm.global_dtors entry.
4939 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4940 } else {
4941 CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4942 }
4943}
#define V(N, I)
Definition: ASTContext.h:3259
static StructorCodegen getCodegenToUse(CodeGenModule &CGM, const CXXMethodDecl *MD)
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM)
Get or define the following function: void @__clang_call_terminate(i8* exn) nounwind noreturn This co...
static llvm::Value * performTypeAdjustment(CodeGenFunction &CGF, Address InitialPtr, int64_t NonVirtualAdjustment, int64_t VirtualAdjustment, bool IsReturnAdjustment)
static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type)
Compute the flags for a __pbase_type_info, and remove the corresponding pieces from Type.
static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty)
ShouldUseExternalRTTIDescriptor - Returns whether the type information for the given type exists some...
static bool IsIncompleteClassType(const RecordType *RecordTy)
IsIncompleteClassType - Returns whether the given record type is incomplete.
static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, SeenBases &Bases)
ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in abi::__vmi_class_type_info.
static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF)
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, llvm::FunctionCallee dtor, llvm::Constant *addr, bool TLS)
Register a global destructor using __cxa_atexit.
static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM)
static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty)
Return the linkage that the type info and type info name constants should have for the given type.
static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static llvm::Function * createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, StringRef FnName)
static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM)
static bool IsStandardLibraryRTTIDescriptor(QualType Ty)
IsStandardLibraryRTTIDescriptor - Returns whether the type information for the given type exists in t...
static llvm::Value * CallBeginCatch(CodeGenFunction &CGF, llvm::Value *Exn, bool EndMightThrow)
Emits a call to __cxa_begin_catch and enters a cleanup to call __cxa_end_catch.
static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static CharUnits computeOffsetHint(ASTContext &Context, const CXXRecordDecl *Src, const CXXRecordDecl *Dst)
Compute the src2dst_offset hint as described in the Itanium C++ ABI [2.9.7].
static bool isThreadWrapperReplaceable(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD)
static void InitCatchParam(CodeGenFunction &CGF, const VarDecl &CatchParam, Address ParamAddr, SourceLocation Loc)
A "special initializer" callback for initializing a catch parameter during catch initialization.
static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty)
TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type info for that type is de...
static bool CanUseSingleInheritance(const CXXRecordDecl *RD)
static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM)
static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
Get the appropriate linkage for the wrapper function.
static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM)
static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static bool ContainsIncompleteClassType(QualType Ty)
ContainsIncompleteClassType - Returns whether the given type contains an incomplete class type.
static void emitConstructorDestructorAlias(CodeGenModule &CGM, GlobalDecl AliasDecl, GlobalDecl TargetDecl)
static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM)
static void dtorTy(Block *, std::byte *Ptr, const Descriptor *)
Definition: Descriptor.cpp:28
int Priority
Definition: Format.cpp:2984
static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD)
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D)
Determine what kind of template specialization the given declaration is.
#define CXXABI(Name, Str)
Definition: TargetCXXABI.h:32
C Language Family Type Representation.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
const ValueDecl * getMemberPointerDecl() const
Definition: APValue.cpp:1053
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
CanQualType LongTy
Definition: ASTContext.h:1095
QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl, ObjCInterfaceDecl *PrevDecl=nullptr) const
getObjCInterfaceType - Return the unique reference to the type for the specified ObjC interface decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2549
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1113
IdentifierTable & Idents
Definition: ASTContext.h:639
const LangOptions & getLangOpts() const
Definition: ASTContext.h:770
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const
Get a function type and produce the equivalent function type with the specified exception specificati...
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType CharTy
Definition: ASTContext.h:1088
CanQualType IntTy
Definition: ASTContext.h:1095
CharUnits getExnObjectAlignment() const
Return the alignment (in bytes) of the thrown exception object.
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getPreferredTypeAlignInChars(QualType T) const
Return the PreferredAlignment of a (complete) type T, in characters.
Definition: ASTContext.h:2374
CanQualType VoidTy
Definition: ASTContext.h:1086
CanQualType UnsignedIntTy
Definition: ASTContext.h:1096
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:752
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
TargetCXXABI::Kind getCXXABIKind() const
Return the C++ ABI kind that should be used.
Definition: ASTContext.cpp:818
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
CanQualType LongLongTy
Definition: ASTContext.h:1095
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:259
This class is used for builtin types like 'int'.
Definition: Type.h:2739
Kind getKind() const
Definition: Type.h:2781
Implements C++ ABI-specific semantic analysis functions.
Definition: CXXABI.h:29
Represents a path from a specific derived class (which is not represented as part of the path) to a p...
BasePaths - Represents the set of paths from a derived class to one of its (direct or indirect) bases...
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
CXXCatchStmt - This represents a C++ catch block.
Definition: StmtCXX.h:28
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2528
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2481
FunctionDecl * getOperatorDelete() const
Definition: ExprCXX.h:2520
bool isGlobalDelete() const
Definition: ExprCXX.h:2506
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2792
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:176
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2053
bool isVirtual() const
Definition: DeclCXX.h:2108
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2179
bool isInstance() const
Definition: DeclCXX.h:2080
CXXMethodDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: DeclCXX.h:2149
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2224
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
base_class_range bases()
Definition: DeclCXX.h:618
unsigned getNumBases() const
Retrieves the number of base classes of this class.
Definition: DeclCXX.h:612
base_class_iterator bases_begin()
Definition: DeclCXX.h:625
base_class_range vbases()
Definition: DeclCXX.h:635
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition: DeclCXX.h:1221
bool isDynamicClass() const
Definition: DeclCXX.h:584
bool hasDefinition() const
Definition: DeclCXX.h:571
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:633
bool isDerivedFrom(const CXXRecordDecl *Base) const
Determine whether this class is derived from the class Base.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1192
const Expr * getSubExpr() const
Definition: ExprCXX.h:1212
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
Qualifiers getQualifiers() const
Retrieve all qualifiers.
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3478
CastKind getCastKind() const
Definition: Expr.h:3522
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
std::string SymbolPartition
The name of the partition that symbols are assigned to, specified with -fsymbol-partition (see https:...
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
An aligned address.
Definition: Address.h:29
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:78
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:62
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:68
llvm::Value * getPointer() const
Definition: Address.h:51
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:57
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:863
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:97
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:259
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:89
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:213
Address CreateGEP(Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:246
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
virtual bool shouldEmitExactDynamicCast(QualType DestRecordTy)=0
virtual void EmitCXXConstructors(const CXXConstructorDecl *D)=0
Emit constructor variants required by this ABI.
virtual llvm::Constant * getAddrOfRTTIDescriptor(QualType Ty)=0
virtual llvm::Value * performReturnAdjustment(CodeGenFunction &CGF, Address Ret, const ReturnAdjustment &RA)=0
virtual llvm::Value * getVTableAddressPointInStructor(CodeGenFunction &CGF, const CXXRecordDecl *RD, BaseSubobject Base, const CXXRecordDecl *NearestVBase)=0
Get the address point of the vtable for the given base subobject while building a constructor or a de...
virtual void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C)=0
virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn)=0
virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const =0
virtual bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr)=0
Checks if ABI requires extra virtual offset for vtable field.
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)=0
Emits the guarded initializer and destructor setup for the given variable, given that it couldn't be ...
virtual void EmitCXXDestructors(const CXXDestructorDecl *D)=0
Emit destructor variants required by this ABI.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const =0
Returns true if the given destructor type should be emitted as a linkonce delegating thunk,...
virtual bool NeedsVTTParameter(GlobalDecl GD)
Return whether the given global decl needs a VTT parameter.
Definition: CGCXXABI.cpp:318
virtual llvm::CallInst * emitTerminateForUnexpectedException(CodeGenFunction &CGF, llvm::Value *Exn)
Definition: CGCXXABI.cpp:323
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Definition: CGCXXABI.h:157
@ RAA_Indirect
Pass it as a pointer to temporary memory.
Definition: CGCXXABI.h:165
virtual llvm::Type * ConvertMemberPointerType(const MemberPointerType *MPT)
Find the LLVM type used to represent the given member pointer type.
Definition: CGCXXABI.cpp:37
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:95
virtual StringRef GetPureVirtualCallName()=0
Gets the pure virtual member call function.
virtual CharUnits getArrayCookieSizeImpl(QualType elementType)
Returns the extra size required in order to store the array cookie for the given type.
Definition: CGCXXABI.cpp:211
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const =0
Determine whether it's possible to emit a vtable for RD, even though we do not know that the vtable h...
virtual StringRef GetDeletedVirtualCallName()=0
Gets the deleted virtual member call name.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
bool isEmittedWithConstantInitializer(const VarDecl *VD, bool InspectInitForWeakDef=false) const
Determine whether we will definitely emit this variable with a constant initializer,...
Definition: CGCXXABI.cpp:166
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:77
virtual llvm::Constant * EmitMemberPointer(const APValue &MP, QualType MPT)
Create a member pointer for the given member pointer constant.
Definition: CGCXXABI.cpp:109
virtual llvm::Constant * getVTableAddressPoint(BaseSubobject Base, const CXXRecordDecl *VTableClass)=0
Get the address point of the vtable for the given base subobject.
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
virtual llvm::Value * readArrayCookieImpl(CodeGenFunction &IGF, Address ptr, CharUnits cookieSize)
Reads the array cookie for an allocation which is known to have one.
Definition: CGCXXABI.cpp:267
virtual llvm::Value * EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, const MemberPointerType *MPT)
Calculate an l-value from an object and a data member pointer.
Definition: CGCXXABI.cpp:55
virtual llvm::Value * getCXXDestructorImplicitParam(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating)=0
Get the implicit (second) parameter that comes after the "this" pointer, or nullptr if there is isn't...
virtual std::pair< llvm::Value *, const CXXRecordDecl * > LoadVTablePtr(CodeGenFunction &CGF, Address This, const CXXRecordDecl *RD)=0
Load a vtable from This, an object of polymorphic type RD, or from one of its virtual bases if it doe...
virtual llvm::Constant * getVTableAddressPointForConstExpr(BaseSubobject Base, const CXXRecordDecl *VTableClass)=0
Get the address point of the vtable for the given base subobject while building a constexpr.
virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, bool ReturnAdjustment)=0
virtual llvm::Value * EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, Address This, DeleteOrMemberCallExpr E)=0
Emit the ABI-specific virtual destructor call.
bool mayNeedDestruction(const VarDecl *VD) const
Definition: CGCXXABI.cpp:153
virtual bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass)=0
Checks if ABI requires to initialize vptrs for given dynamic class.
virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E)=0
virtual llvm::Value * GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl)=0
virtual bool isThisCompleteObject(GlobalDecl GD) const =0
Determine whether there's something special about the rules of the ABI tell us that 'this' is a compl...
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual bool classifyReturnType(CGFunctionInfo &FI) const =0
If the C++ ABI requires the given type be returned in a particular way, this method sets RetAI and re...
virtual void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, Address Ptr, QualType ElementType, const CXXDestructorDecl *Dtor)=0
virtual CatchTypeInfo getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType)=0
virtual void EmitThreadLocalInitFuncs(CodeGenModule &CGM, ArrayRef< const VarDecl * > CXXThreadLocals, ArrayRef< llvm::Function * > CXXThreadLocalInits, ArrayRef< const VarDecl * > CXXThreadLocalInitVars)=0
Emits ABI-required functions necessary to initialize thread_local variables in this translation unit.
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual llvm::Value * emitExactDynamicCast(CodeGenFunction &CGF, Address Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess, llvm::BasicBlock *CastFail)=0
Emit a dynamic_cast from SrcRecordTy to DestRecordTy.
virtual void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)=0
Emit the destructor call.
virtual llvm::GlobalVariable * getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset)=0
Get the address of the vtable for the given record decl which should be used for the vptr at the give...
virtual bool EmitBadCastCall(CodeGenFunction &CGF)=0
virtual llvm::Constant * EmitMemberDataPointer(const MemberPointerType *MPT, CharUnits offset)
Create a member pointer for the given field.
Definition: CGCXXABI.cpp:104
virtual llvm::Value * EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, Address ThisPtr, llvm::Type *StdTypeInfoPtrTy)=0
virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD)=0
Emit any tables needed to implement virtual inheritance.
virtual void emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD)=0
Emits the VTable definitions required for the given record type.
virtual CGCallee EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E, Address This, llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr, const MemberPointerType *MPT)
Load a member function from an object and a member function pointer.
Definition: CGCXXABI.cpp:41
virtual void emitCXXStructor(GlobalDecl GD)=0
Emit a single constructor/destructor with the given type from a C++ constructor Decl.
virtual bool exportThunk(