clang 20.0.0git
ItaniumCXXABI.cpp
Go to the documentation of this file.
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGRecordLayout.h"
23#include "CGVTables.h"
24#include "CodeGenFunction.h"
25#include "CodeGenModule.h"
26#include "ConstantEmitter.h"
27#include "TargetInfo.h"
28#include "clang/AST/Attr.h"
29#include "clang/AST/Mangle.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/Type.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/ScopedPrinter.h"
39
40#include <optional>
41
42using namespace clang;
43using namespace CodeGen;
44
45namespace {
46class ItaniumCXXABI : public CodeGen::CGCXXABI {
47 /// VTables - All the vtables which have been defined.
48 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
49
50 /// All the thread wrapper functions that have been used.
52 ThreadWrappers;
53
54protected:
55 bool UseARMMethodPtrABI;
56 bool UseARMGuardVarABI;
57 bool Use32BitVTableOffsetABI;
58
60 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
61 }
62
63public:
64 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
65 bool UseARMMethodPtrABI = false,
66 bool UseARMGuardVarABI = false) :
67 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
68 UseARMGuardVarABI(UseARMGuardVarABI),
69 Use32BitVTableOffsetABI(false) { }
70
71 bool classifyReturnType(CGFunctionInfo &FI) const override;
72
73 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
74 // If C++ prohibits us from making a copy, pass by address.
75 if (!RD->canPassInRegisters())
76 return RAA_Indirect;
77 return RAA_Default;
78 }
79
80 bool isThisCompleteObject(GlobalDecl GD) const override {
81 // The Itanium ABI has separate complete-object vs. base-object
82 // variants of both constructors and destructors.
83 if (isa<CXXDestructorDecl>(GD.getDecl())) {
84 switch (GD.getDtorType()) {
85 case Dtor_Complete:
86 case Dtor_Deleting:
87 return true;
88
89 case Dtor_Base:
90 return false;
91
92 case Dtor_Comdat:
93 llvm_unreachable("emitting dtor comdat as function?");
94 }
95 llvm_unreachable("bad dtor kind");
96 }
97 if (isa<CXXConstructorDecl>(GD.getDecl())) {
98 switch (GD.getCtorType()) {
99 case Ctor_Complete:
100 return true;
101
102 case Ctor_Base:
103 return false;
104
107 llvm_unreachable("closure ctors in Itanium ABI?");
108
109 case Ctor_Comdat:
110 llvm_unreachable("emitting ctor comdat as function?");
111 }
112 llvm_unreachable("bad dtor kind");
113 }
114
115 // No other kinds.
116 return false;
117 }
118
119 bool isZeroInitializable(const MemberPointerType *MPT) override;
120
121 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
122
125 const Expr *E,
126 Address This,
127 llvm::Value *&ThisPtrForCall,
128 llvm::Value *MemFnPtr,
129 const MemberPointerType *MPT) override;
130
131 llvm::Value *
134 llvm::Value *MemPtr,
135 const MemberPointerType *MPT) override;
136
138 const CastExpr *E,
139 llvm::Value *Src) override;
140 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
141 llvm::Constant *Src) override;
142
143 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
144
145 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
146 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
147 CharUnits offset) override;
148 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
149 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
151
153 llvm::Value *L, llvm::Value *R,
154 const MemberPointerType *MPT,
155 bool Inequality) override;
156
158 llvm::Value *Addr,
159 const MemberPointerType *MPT) override;
160
162 Address Ptr, QualType ElementType,
163 const CXXDestructorDecl *Dtor) override;
164
165 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
166 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
167
168 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
169
170 llvm::CallInst *
172 llvm::Value *Exn) override;
173
174 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
175 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
178 QualType CatchHandlerType) override {
180 }
181
182 bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
183 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
184 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
185 Address ThisPtr,
186 llvm::Type *StdTypeInfoPtrTy) override;
187
188 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
189 QualType SrcRecordTy) override;
190
191 /// Determine whether we know that all instances of type RecordTy will have
192 /// the same vtable pointer values, that is distinct from all other vtable
193 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
194 /// practice in some cases due to language extensions.
195 bool hasUniqueVTablePointer(QualType RecordTy) {
196 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
197
198 // Under -fapple-kext, multiple definitions of the same vtable may be
199 // emitted.
200 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
201 getContext().getLangOpts().AppleKext)
202 return false;
203
204 // If the type_info* would be null, the vtable might be merged with that of
205 // another type.
206 if (!CGM.shouldEmitRTTI())
207 return false;
208
209 // If there's only one definition of the vtable in the program, it has a
210 // unique address.
211 if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD)))
212 return true;
213
214 // Even if there are multiple definitions of the vtable, they are required
215 // by the ABI to use the same symbol name, so should be merged at load
216 // time. However, if the class has hidden visibility, there can be
217 // different versions of the class in different modules, and the ABI
218 // library might treat them as being the same.
219 if (CGM.GetLLVMVisibility(RD->getVisibility()) !=
220 llvm::GlobalValue::DefaultVisibility)
221 return false;
222
223 return true;
224 }
225
226 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
227 return hasUniqueVTablePointer(DestRecordTy);
228 }
229
231 QualType SrcRecordTy, QualType DestTy,
232 QualType DestRecordTy,
233 llvm::BasicBlock *CastEnd) override;
234
235 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
236 QualType SrcRecordTy, QualType DestTy,
237 QualType DestRecordTy,
238 llvm::BasicBlock *CastSuccess,
239 llvm::BasicBlock *CastFail) override;
240
242 QualType SrcRecordTy) override;
243
244 bool EmitBadCastCall(CodeGenFunction &CGF) override;
245
246 llvm::Value *
248 const CXXRecordDecl *ClassDecl,
249 const CXXRecordDecl *BaseClassDecl) override;
250
251 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
252
253 AddedStructorArgCounts
255 SmallVectorImpl<CanQualType> &ArgTys) override;
256
258 CXXDtorType DT) const override {
259 // Itanium does not emit any destructor variant as an inline thunk.
260 // Delegating may occur as an optimization, but all variants are either
261 // emitted with external linkage or as linkonce if they are inline and used.
262 return false;
263 }
264
265 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
266
268 FunctionArgList &Params) override;
269
271
272 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
273 const CXXConstructorDecl *D,
275 bool ForVirtualBase,
276 bool Delegating) override;
277
279 const CXXDestructorDecl *DD,
281 bool ForVirtualBase,
282 bool Delegating) override;
283
285 CXXDtorType Type, bool ForVirtualBase,
286 bool Delegating, Address This,
287 QualType ThisTy) override;
288
290 const CXXRecordDecl *RD) override;
291
293 CodeGenFunction::VPtr Vptr) override;
294
295 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
296 return true;
297 }
298
299 llvm::Constant *
301 const CXXRecordDecl *VTableClass) override;
302
304 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
305 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
306
307 llvm::Value *getVTableAddressPointInStructorWithVTT(
308 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
309 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
310
311 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
312 CharUnits VPtrOffset) override;
313
315 Address This, llvm::Type *Ty,
316 SourceLocation Loc) override;
317
319 const CXXDestructorDecl *Dtor,
320 CXXDtorType DtorType, Address This,
321 DeleteOrMemberCallExpr E) override;
322
323 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
324
325 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
326 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
327
328 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
329 bool ReturnAdjustment) override {
330 // Allow inlining of thunks by emitting them with available_externally
331 // linkage together with vtables when needed.
332 if (ForVTable && !Thunk->hasLocalLinkage())
333 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
334 CGM.setGVProperties(Thunk, GD);
335 }
336
337 bool exportThunk() override { return true; }
338
339 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
340 const CXXRecordDecl *UnadjustedThisClass,
341 const ThunkInfo &TI) override;
342
343 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
344 const CXXRecordDecl *UnadjustedRetClass,
345 const ReturnAdjustment &RA) override;
346
348 FunctionArgList &Args) const override {
349 assert(!Args.empty() && "expected the arglist to not be empty!");
350 return Args.size() - 1;
351 }
352
353 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
354 StringRef GetDeletedVirtualCallName() override
355 { return "__cxa_deleted_virtual"; }
356
357 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
359 Address NewPtr,
360 llvm::Value *NumElements,
361 const CXXNewExpr *expr,
362 QualType ElementType) override;
363 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
364 Address allocPtr,
365 CharUnits cookieSize) override;
366
367 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
368 llvm::GlobalVariable *DeclPtr,
369 bool PerformInit) override;
371 llvm::FunctionCallee dtor,
372 llvm::Constant *addr) override;
373
374 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
375 llvm::Value *Val);
377 CodeGenModule &CGM,
378 ArrayRef<const VarDecl *> CXXThreadLocals,
379 ArrayRef<llvm::Function *> CXXThreadLocalInits,
380 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
381
382 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
385 }
387 QualType LValType) override;
388
389 bool NeedsVTTParameter(GlobalDecl GD) override;
390
391 llvm::Constant *
392 getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD);
393
394 /**************************** RTTI Uniqueness ******************************/
395
396protected:
397 /// Returns true if the ABI requires RTTI type_info objects to be unique
398 /// across a program.
399 virtual bool shouldRTTIBeUnique() const { return true; }
400
401public:
402 /// What sort of unique-RTTI behavior should we use?
403 enum RTTIUniquenessKind {
404 /// We are guaranteeing, or need to guarantee, that the RTTI string
405 /// is unique.
406 RUK_Unique,
407
408 /// We are not guaranteeing uniqueness for the RTTI string, so we
409 /// can demote to hidden visibility but must use string comparisons.
410 RUK_NonUniqueHidden,
411
412 /// We are not guaranteeing uniqueness for the RTTI string, so we
413 /// have to use string comparisons, but we also have to emit it with
414 /// non-hidden visibility.
415 RUK_NonUniqueVisible
416 };
417
418 /// Return the required visibility status for the given type and linkage in
419 /// the current ABI.
420 RTTIUniquenessKind
421 classifyRTTIUniqueness(QualType CanTy,
422 llvm::GlobalValue::LinkageTypes Linkage) const;
423 friend class ItaniumRTTIBuilder;
424
425 void emitCXXStructor(GlobalDecl GD) override;
426
427 std::pair<llvm::Value *, const CXXRecordDecl *>
429 const CXXRecordDecl *RD) override;
430
431 private:
432 llvm::Constant *
433 getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD);
434
435 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
436 const auto &VtableLayout =
437 CGM.getItaniumVTableContext().getVTableLayout(RD);
438
439 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
440 // Skip empty slot.
441 if (!VtableComponent.isUsedFunctionPointerKind())
442 continue;
443
444 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
445 if (!Method->getCanonicalDecl()->isInlined())
446 continue;
447
448 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
449 auto *Entry = CGM.GetGlobalValue(Name);
450 // This checks if virtual inline function has already been emitted.
451 // Note that it is possible that this inline function would be emitted
452 // after trying to emit vtable speculatively. Because of this we do
453 // an extra pass after emitting all deferred vtables to find and emit
454 // these vtables opportunistically.
455 if (!Entry || Entry->isDeclaration())
456 return true;
457 }
458 return false;
459 }
460
461 bool isVTableHidden(const CXXRecordDecl *RD) const {
462 const auto &VtableLayout =
463 CGM.getItaniumVTableContext().getVTableLayout(RD);
464
465 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
466 if (VtableComponent.isRTTIKind()) {
467 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
468 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
469 return true;
470 } else if (VtableComponent.isUsedFunctionPointerKind()) {
471 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
472 if (Method->getVisibility() == Visibility::HiddenVisibility &&
473 !Method->isDefined())
474 return true;
475 }
476 }
477 return false;
478 }
479};
480
481class ARMCXXABI : public ItaniumCXXABI {
482public:
483 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
484 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
485 /*UseARMGuardVarABI=*/true) {}
486
487 bool constructorsAndDestructorsReturnThis() const override { return true; }
488
489 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
490 QualType ResTy) override;
491
492 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
493 Address InitializeArrayCookie(CodeGenFunction &CGF,
494 Address NewPtr,
495 llvm::Value *NumElements,
496 const CXXNewExpr *expr,
497 QualType ElementType) override;
498 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
499 CharUnits cookieSize) override;
500};
501
502class AppleARM64CXXABI : public ARMCXXABI {
503public:
504 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
505 Use32BitVTableOffsetABI = true;
506 }
507
508 // ARM64 libraries are prepared for non-unique RTTI.
509 bool shouldRTTIBeUnique() const override { return false; }
510};
511
512class FuchsiaCXXABI final : public ItaniumCXXABI {
513public:
514 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
515 : ItaniumCXXABI(CGM) {}
516
517private:
518 bool constructorsAndDestructorsReturnThis() const override { return true; }
519};
520
521class WebAssemblyCXXABI final : public ItaniumCXXABI {
522public:
523 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
524 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
525 /*UseARMGuardVarABI=*/true) {}
526 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
527 llvm::CallInst *
528 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
529 llvm::Value *Exn) override;
530
531private:
532 bool constructorsAndDestructorsReturnThis() const override { return true; }
533 bool canCallMismatchedFunctionType() const override { return false; }
534};
535
536class XLCXXABI final : public ItaniumCXXABI {
537public:
538 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
539 : ItaniumCXXABI(CGM) {}
540
541 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
542 llvm::FunctionCallee dtor,
543 llvm::Constant *addr) override;
544
545 bool useSinitAndSterm() const override { return true; }
546
547private:
548 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
549 llvm::Constant *addr);
550};
551}
552
554 switch (CGM.getContext().getCXXABIKind()) {
555 // For IR-generation purposes, there's no significant difference
556 // between the ARM and iOS ABIs.
557 case TargetCXXABI::GenericARM:
558 case TargetCXXABI::iOS:
559 case TargetCXXABI::WatchOS:
560 return new ARMCXXABI(CGM);
561
562 case TargetCXXABI::AppleARM64:
563 return new AppleARM64CXXABI(CGM);
564
565 case TargetCXXABI::Fuchsia:
566 return new FuchsiaCXXABI(CGM);
567
568 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
569 // include the other 32-bit ARM oddities: constructor/destructor return values
570 // and array cookies.
571 case TargetCXXABI::GenericAArch64:
572 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
573 /*UseARMGuardVarABI=*/true);
574
575 case TargetCXXABI::GenericMIPS:
576 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
577
578 case TargetCXXABI::WebAssembly:
579 return new WebAssemblyCXXABI(CGM);
580
581 case TargetCXXABI::XL:
582 return new XLCXXABI(CGM);
583
584 case TargetCXXABI::GenericItanium:
585 return new ItaniumCXXABI(CGM);
586
587 case TargetCXXABI::Microsoft:
588 llvm_unreachable("Microsoft ABI is not Itanium-based");
589 }
590 llvm_unreachable("bad ABI kind");
591}
592
593llvm::Type *
594ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
595 if (MPT->isMemberDataPointer())
596 return CGM.PtrDiffTy;
597 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
598}
599
600/// In the Itanium and ARM ABIs, method pointers have the form:
601/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
602///
603/// In the Itanium ABI:
604/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
605/// - the this-adjustment is (memptr.adj)
606/// - the virtual offset is (memptr.ptr - 1)
607///
608/// In the ARM ABI:
609/// - method pointers are virtual if (memptr.adj & 1) is nonzero
610/// - the this-adjustment is (memptr.adj >> 1)
611/// - the virtual offset is (memptr.ptr)
612/// ARM uses 'adj' for the virtual flag because Thumb functions
613/// may be only single-byte aligned.
614///
615/// If the member is virtual, the adjusted 'this' pointer points
616/// to a vtable pointer from which the virtual offset is applied.
617///
618/// If the member is non-virtual, memptr.ptr is the address of
619/// the function to call.
620CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
621 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
622 llvm::Value *&ThisPtrForCall,
623 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
624 CGBuilderTy &Builder = CGF.Builder;
625
626 const FunctionProtoType *FPT =
628 auto *RD =
629 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
630
631 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
632
633 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
634 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
635 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
636
637 // Extract memptr.adj, which is in the second field.
638 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
639
640 // Compute the true adjustment.
641 llvm::Value *Adj = RawAdj;
642 if (UseARMMethodPtrABI)
643 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
644
645 // Apply the adjustment and cast back to the original struct type
646 // for consistency.
647 llvm::Value *This = ThisAddr.emitRawPointer(CGF);
648 This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj);
649 ThisPtrForCall = This;
650
651 // Load the function pointer.
652 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
653
654 // If the LSB in the function pointer is 1, the function pointer points to
655 // a virtual function.
656 llvm::Value *IsVirtual;
657 if (UseARMMethodPtrABI)
658 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
659 else
660 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
661 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
662 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
663
664 // In the virtual path, the adjustment left 'This' pointing to the
665 // vtable of the correct base subobject. The "function pointer" is an
666 // offset within the vtable (+1 for the virtual flag on non-ARM).
667 CGF.EmitBlock(FnVirtual);
668
669 // Cast the adjusted this to a pointer to vtable pointer and load.
670 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
671 CharUnits VTablePtrAlign =
672 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
673 CGF.getPointerAlign());
674 llvm::Value *VTable = CGF.GetVTablePtr(
675 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
676
677 // Apply the offset.
678 // On ARM64, to reserve extra space in virtual member function pointers,
679 // we only pay attention to the low 32 bits of the offset.
680 llvm::Value *VTableOffset = FnAsInt;
681 if (!UseARMMethodPtrABI)
682 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
683 if (Use32BitVTableOffsetABI) {
684 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
685 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
686 }
687
688 // Check the address of the function pointer if CFI on member function
689 // pointers is enabled.
690 llvm::Constant *CheckSourceLocation;
691 llvm::Constant *CheckTypeDesc;
692 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
693 CGM.HasHiddenLTOVisibility(RD);
694 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
695 CGM.HasHiddenLTOVisibility(RD);
696 bool ShouldEmitWPDInfo =
697 CGM.getCodeGenOpts().WholeProgramVTables &&
698 // Don't insert type tests if we are forcing public visibility.
699 !CGM.AlwaysHasLTOVisibilityPublic(RD);
700 llvm::Value *VirtualFn = nullptr;
701
702 {
703 CodeGenFunction::SanitizerScope SanScope(&CGF);
704 llvm::Value *TypeId = nullptr;
705 llvm::Value *CheckResult = nullptr;
706
707 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
708 // If doing CFI, VFE or WPD, we will need the metadata node to check
709 // against.
710 llvm::Metadata *MD =
711 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
712 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
713 }
714
715 if (ShouldEmitVFEInfo) {
716 llvm::Value *VFPAddr =
717 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
718
719 // If doing VFE, load from the vtable with a type.checked.load intrinsic
720 // call. Note that we use the GEP to calculate the address to load from
721 // and pass 0 as the offset to the intrinsic. This is because every
722 // vtable slot of the correct type is marked with matching metadata, and
723 // we know that the load must be from one of these slots.
724 llvm::Value *CheckedLoad = Builder.CreateCall(
725 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
726 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
727 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
728 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
729 } else {
730 // When not doing VFE, emit a normal load, as it allows more
731 // optimisations than type.checked.load.
732 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
733 llvm::Value *VFPAddr =
734 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
735 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
736 ? llvm::Intrinsic::type_test
737 : llvm::Intrinsic::public_type_test;
738
739 CheckResult =
740 Builder.CreateCall(CGM.getIntrinsic(IID), {VFPAddr, TypeId});
741 }
742
743 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
744 VirtualFn = CGF.Builder.CreateCall(
745 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
746 {VTableOffset->getType()}),
747 {VTable, VTableOffset});
748 } else {
749 llvm::Value *VFPAddr =
750 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
751 VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
752 CGF.getPointerAlign(),
753 "memptr.virtualfn");
754 }
755 }
756 assert(VirtualFn && "Virtual fuction pointer not created!");
757 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
758 CheckResult) &&
759 "Check result required but not created!");
760
761 if (ShouldEmitCFICheck) {
762 // If doing CFI, emit the check.
763 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
764 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
765 llvm::Constant *StaticData[] = {
766 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
767 CheckSourceLocation,
768 CheckTypeDesc,
769 };
770
771 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
772 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
773 } else {
774 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
775 CGM.getLLVMContext(),
776 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
777 llvm::Value *ValidVtable = Builder.CreateCall(
778 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
779 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
780 SanitizerHandler::CFICheckFail, StaticData,
781 {VTable, ValidVtable});
782 }
783
784 FnVirtual = Builder.GetInsertBlock();
785 }
786 } // End of sanitizer scope
787
788 CGF.EmitBranch(FnEnd);
789
790 // In the non-virtual path, the function pointer is actually a
791 // function pointer.
792 CGF.EmitBlock(FnNonVirtual);
793 llvm::Value *NonVirtualFn =
794 Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
795
796 // Check the function pointer if CFI on member function pointers is enabled.
797 if (ShouldEmitCFICheck) {
799 if (RD->hasDefinition()) {
800 CodeGenFunction::SanitizerScope SanScope(&CGF);
801
802 llvm::Constant *StaticData[] = {
803 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
804 CheckSourceLocation,
805 CheckTypeDesc,
806 };
807
808 llvm::Value *Bit = Builder.getFalse();
809 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
810 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
811 getContext().getMemberPointerType(
812 MPT->getPointeeType(),
813 getContext().getRecordType(Base).getTypePtr()));
814 llvm::Value *TypeId =
815 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
816
817 llvm::Value *TypeTest =
818 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
819 {NonVirtualFn, TypeId});
820 Bit = Builder.CreateOr(Bit, TypeTest);
821 }
822
823 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
824 SanitizerHandler::CFICheckFail, StaticData,
825 {NonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
826
827 FnNonVirtual = Builder.GetInsertBlock();
828 }
829 }
830
831 // We're done.
832 CGF.EmitBlock(FnEnd);
833 llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
834 CalleePtr->addIncoming(VirtualFn, FnVirtual);
835 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
836
837 CGPointerAuthInfo PointerAuth;
838
839 if (const auto &Schema =
840 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) {
841 llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(CGF.IntPtrTy, 2);
842 DiscriminatorPHI->addIncoming(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
843 FnVirtual);
844 const auto &AuthInfo =
845 CGM.getMemberFunctionPointerAuthInfo(QualType(MPT, 0));
846 assert(Schema.getKey() == AuthInfo.getKey() &&
847 "Keys for virtual and non-virtual member functions must match");
848 auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator();
849 DiscriminatorPHI->addIncoming(NonVirtualDiscriminator, FnNonVirtual);
850 PointerAuth = CGPointerAuthInfo(
851 Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(),
852 Schema.authenticatesNullValues(), DiscriminatorPHI);
853 }
854
855 CGCallee Callee(FPT, CalleePtr, PointerAuth);
856 return Callee;
857}
858
859/// Compute an l-value by applying the given pointer-to-member to a
860/// base object.
861llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
862 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
863 const MemberPointerType *MPT) {
864 assert(MemPtr->getType() == CGM.PtrDiffTy);
865
866 CGBuilderTy &Builder = CGF.Builder;
867
868 // Apply the offset, which we assume is non-null.
869 return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.emitRawPointer(CGF), MemPtr,
870 "memptr.offset");
871}
872
873// See if it's possible to return a constant signed pointer.
874static llvm::Constant *pointerAuthResignConstant(
875 llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo,
876 const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) {
877 const auto *CPA = dyn_cast<llvm::ConstantPtrAuth>(Ptr);
878
879 if (!CPA)
880 return nullptr;
881
882 assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() &&
883 CPA->getAddrDiscriminator()->isZeroValue() &&
884 CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() &&
885 "unexpected key or discriminators");
886
887 return CGM.getConstantSignedPointer(
888 CPA->getPointer(), NewAuthInfo.getKey(), nullptr,
889 cast<llvm::ConstantInt>(NewAuthInfo.getDiscriminator()));
890}
891
892/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
893/// conversion.
894///
895/// Bitcast conversions are always a no-op under Itanium.
896///
897/// Obligatory offset/adjustment diagram:
898/// <-- offset --> <-- adjustment -->
899/// |--------------------------|----------------------|--------------------|
900/// ^Derived address point ^Base address point ^Member address point
901///
902/// So when converting a base member pointer to a derived member pointer,
903/// we add the offset to the adjustment because the address point has
904/// decreased; and conversely, when converting a derived MP to a base MP
905/// we subtract the offset from the adjustment because the address point
906/// has increased.
907///
908/// The standard forbids (at compile time) conversion to and from
909/// virtual bases, which is why we don't have to consider them here.
910///
911/// The standard forbids (at run time) casting a derived MP to a base
912/// MP when the derived MP does not point to a member of the base.
913/// This is why -1 is a reasonable choice for null data member
914/// pointers.
915llvm::Value *
916ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
917 const CastExpr *E,
918 llvm::Value *src) {
919 // Use constant emission if we can.
920 if (isa<llvm::Constant>(src))
921 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
922
923 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
924 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
925 E->getCastKind() == CK_ReinterpretMemberPointer);
926
927 CGBuilderTy &Builder = CGF.Builder;
928 QualType DstType = E->getType();
929
930 if (DstType->isMemberFunctionPointerType()) {
931 if (const auto &NewAuthInfo =
932 CGM.getMemberFunctionPointerAuthInfo(DstType)) {
933 QualType SrcType = E->getSubExpr()->getType();
934 assert(SrcType->isMemberFunctionPointerType());
935 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType);
936 llvm::Value *MemFnPtr = Builder.CreateExtractValue(src, 0, "memptr.ptr");
937 llvm::Type *OrigTy = MemFnPtr->getType();
938
939 llvm::BasicBlock *StartBB = Builder.GetInsertBlock();
940 llvm::BasicBlock *ResignBB = CGF.createBasicBlock("resign");
941 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("merge");
942
943 // Check whether we have a virtual offset or a pointer to a function.
944 assert(UseARMMethodPtrABI && "ARM ABI expected");
945 llvm::Value *Adj = Builder.CreateExtractValue(src, 1, "memptr.adj");
946 llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
947 llvm::Value *AndVal = Builder.CreateAnd(Adj, Ptrdiff_1);
948 llvm::Value *IsVirtualOffset =
949 Builder.CreateIsNotNull(AndVal, "is.virtual.offset");
950 Builder.CreateCondBr(IsVirtualOffset, MergeBB, ResignBB);
951
952 CGF.EmitBlock(ResignBB);
953 llvm::Type *PtrTy = llvm::PointerType::getUnqual(CGM.Int8Ty);
954 MemFnPtr = Builder.CreateIntToPtr(MemFnPtr, PtrTy);
955 MemFnPtr =
956 CGF.emitPointerAuthResign(MemFnPtr, SrcType, CurAuthInfo, NewAuthInfo,
957 isa<llvm::Constant>(src));
958 MemFnPtr = Builder.CreatePtrToInt(MemFnPtr, OrigTy);
959 llvm::Value *ResignedVal = Builder.CreateInsertValue(src, MemFnPtr, 0);
960 ResignBB = Builder.GetInsertBlock();
961
962 CGF.EmitBlock(MergeBB);
963 llvm::PHINode *NewSrc = Builder.CreatePHI(src->getType(), 2);
964 NewSrc->addIncoming(src, StartBB);
965 NewSrc->addIncoming(ResignedVal, ResignBB);
966 src = NewSrc;
967 }
968 }
969
970 // Under Itanium, reinterprets don't require any additional processing.
971 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
972
973 llvm::Constant *adj = getMemberPointerAdjustment(E);
974 if (!adj) return src;
975
976 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
977
978 const MemberPointerType *destTy =
980
981 // For member data pointers, this is just a matter of adding the
982 // offset if the source is non-null.
983 if (destTy->isMemberDataPointer()) {
984 llvm::Value *dst;
985 if (isDerivedToBase)
986 dst = Builder.CreateNSWSub(src, adj, "adj");
987 else
988 dst = Builder.CreateNSWAdd(src, adj, "adj");
989
990 // Null check.
991 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
992 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
993 return Builder.CreateSelect(isNull, src, dst);
994 }
995
996 // The this-adjustment is left-shifted by 1 on ARM.
997 if (UseARMMethodPtrABI) {
998 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
999 offset <<= 1;
1000 adj = llvm::ConstantInt::get(adj->getType(), offset);
1001 }
1002
1003 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
1004 llvm::Value *dstAdj;
1005 if (isDerivedToBase)
1006 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
1007 else
1008 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
1009
1010 return Builder.CreateInsertValue(src, dstAdj, 1);
1011}
1012
1013static llvm::Constant *
1015 QualType SrcType, CodeGenModule &CGM) {
1016 assert(DestType->isMemberFunctionPointerType() &&
1017 SrcType->isMemberFunctionPointerType() &&
1018 "member function pointers expected");
1019 if (DestType == SrcType)
1020 return Src;
1021
1022 const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(DestType);
1023 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType);
1024
1025 if (!NewAuthInfo && !CurAuthInfo)
1026 return Src;
1027
1028 llvm::Constant *MemFnPtr = Src->getAggregateElement(0u);
1029 if (MemFnPtr->getNumOperands() == 0) {
1030 // src must be a pair of null pointers.
1031 assert(isa<llvm::ConstantInt>(MemFnPtr) && "constant int expected");
1032 return Src;
1033 }
1034
1035 llvm::Constant *ConstPtr = pointerAuthResignConstant(
1036 cast<llvm::User>(MemFnPtr)->getOperand(0), CurAuthInfo, NewAuthInfo, CGM);
1037 ConstPtr = llvm::ConstantExpr::getPtrToInt(ConstPtr, MemFnPtr->getType());
1038 return ConstantFoldInsertValueInstruction(Src, ConstPtr, 0);
1039}
1040
1041llvm::Constant *
1042ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
1043 llvm::Constant *src) {
1044 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
1045 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
1046 E->getCastKind() == CK_ReinterpretMemberPointer);
1047
1048 QualType DstType = E->getType();
1049
1050 if (DstType->isMemberFunctionPointerType())
1052 src, DstType, E->getSubExpr()->getType(), CGM);
1053
1054 // Under Itanium, reinterprets don't require any additional processing.
1055 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
1056
1057 // If the adjustment is trivial, we don't need to do anything.
1058 llvm::Constant *adj = getMemberPointerAdjustment(E);
1059 if (!adj) return src;
1060
1061 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
1062
1063 const MemberPointerType *destTy =
1065
1066 // For member data pointers, this is just a matter of adding the
1067 // offset if the source is non-null.
1068 if (destTy->isMemberDataPointer()) {
1069 // null maps to null.
1070 if (src->isAllOnesValue()) return src;
1071
1072 if (isDerivedToBase)
1073 return llvm::ConstantExpr::getNSWSub(src, adj);
1074 else
1075 return llvm::ConstantExpr::getNSWAdd(src, adj);
1076 }
1077
1078 // The this-adjustment is left-shifted by 1 on ARM.
1079 if (UseARMMethodPtrABI) {
1080 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
1081 offset <<= 1;
1082 adj = llvm::ConstantInt::get(adj->getType(), offset);
1083 }
1084
1085 llvm::Constant *srcAdj = src->getAggregateElement(1);
1086 llvm::Constant *dstAdj;
1087 if (isDerivedToBase)
1088 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
1089 else
1090 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
1091
1092 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
1093 assert(res != nullptr && "Folding must succeed");
1094 return res;
1095}
1096
1097llvm::Constant *
1098ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1099 // Itanium C++ ABI 2.3:
1100 // A NULL pointer is represented as -1.
1101 if (MPT->isMemberDataPointer())
1102 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1103
1104 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1105 llvm::Constant *Values[2] = { Zero, Zero };
1106 return llvm::ConstantStruct::getAnon(Values);
1107}
1108
1109llvm::Constant *
1110ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1111 CharUnits offset) {
1112 // Itanium C++ ABI 2.3:
1113 // A pointer to data member is an offset from the base address of
1114 // the class object containing it, represented as a ptrdiff_t
1115 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1116}
1117
1118llvm::Constant *
1119ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1120 return BuildMemberPointer(MD, CharUnits::Zero());
1121}
1122
1123llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1125 assert(MD->isInstance() && "Member function must not be static!");
1126
1127 CodeGenTypes &Types = CGM.getTypes();
1128
1129 // Get the function pointer (or index if this is a virtual function).
1130 llvm::Constant *MemPtr[2];
1131 if (MD->isVirtual()) {
1132 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1133 uint64_t VTableOffset;
1134 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1135 // Multiply by 4-byte relative offsets.
1136 VTableOffset = Index * 4;
1137 } else {
1138 const ASTContext &Context = getContext();
1139 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1140 Context.getTargetInfo().getPointerWidth(LangAS::Default));
1141 VTableOffset = Index * PointerWidth.getQuantity();
1142 }
1143
1144 if (UseARMMethodPtrABI) {
1145 // ARM C++ ABI 3.2.1:
1146 // This ABI specifies that adj contains twice the this
1147 // adjustment, plus 1 if the member function is virtual. The
1148 // least significant bit of adj then makes exactly the same
1149 // discrimination as the least significant bit of ptr does for
1150 // Itanium.
1151
1152 // We cannot use the Itanium ABI's representation for virtual member
1153 // function pointers under pointer authentication because it would
1154 // require us to store both the virtual offset and the constant
1155 // discriminator in the pointer, which would be immediately vulnerable
1156 // to attack. Instead we introduce a thunk that does the virtual dispatch
1157 // and store it as if it were a non-virtual member function. This means
1158 // that virtual function pointers may not compare equal anymore, but
1159 // fortunately they aren't required to by the standard, and we do make
1160 // a best-effort attempt to re-use the thunk.
1161 //
1162 // To support interoperation with code in which pointer authentication
1163 // is disabled, derefencing a member function pointer must still handle
1164 // the virtual case, but it can use a discriminator which should never
1165 // be valid.
1166 const auto &Schema =
1167 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers;
1168 if (Schema)
1169 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(
1170 getSignedVirtualMemberFunctionPointer(MD), CGM.PtrDiffTy);
1171 else
1172 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1173 // Don't set the LSB of adj to 1 if pointer authentication for member
1174 // function pointers is enabled.
1175 MemPtr[1] = llvm::ConstantInt::get(
1176 CGM.PtrDiffTy, 2 * ThisAdjustment.getQuantity() + !Schema);
1177 } else {
1178 // Itanium C++ ABI 2.3:
1179 // For a virtual function, [the pointer field] is 1 plus the
1180 // virtual table offset (in bytes) of the function,
1181 // represented as a ptrdiff_t.
1182 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1183 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1184 ThisAdjustment.getQuantity());
1185 }
1186 } else {
1187 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1188 llvm::Type *Ty;
1189 // Check whether the function has a computable LLVM signature.
1190 if (Types.isFuncTypeConvertible(FPT)) {
1191 // The function has a computable LLVM signature; use the correct type.
1192 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1193 } else {
1194 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1195 // function type is incomplete.
1196 Ty = CGM.PtrDiffTy;
1197 }
1198 llvm::Constant *addr = CGM.getMemberFunctionPointer(MD, Ty);
1199
1200 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1201 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1202 (UseARMMethodPtrABI ? 2 : 1) *
1203 ThisAdjustment.getQuantity());
1204 }
1205
1206 return llvm::ConstantStruct::getAnon(MemPtr);
1207}
1208
1209llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1210 QualType MPType) {
1211 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1212 const ValueDecl *MPD = MP.getMemberPointerDecl();
1213 if (!MPD)
1214 return EmitNullMemberPointer(MPT);
1215
1216 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1217
1218 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
1219 llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
1220 QualType SrcType = getContext().getMemberPointerType(
1221 MD->getType(), MD->getParent()->getTypeForDecl());
1222 return pointerAuthResignMemberFunctionPointer(Src, MPType, SrcType, CGM);
1223 }
1224
1225 CharUnits FieldOffset =
1226 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1227 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1228}
1229
1230/// The comparison algorithm is pretty easy: the member pointers are
1231/// the same if they're either bitwise identical *or* both null.
1232///
1233/// ARM is different here only because null-ness is more complicated.
1234llvm::Value *
1235ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1236 llvm::Value *L,
1237 llvm::Value *R,
1238 const MemberPointerType *MPT,
1239 bool Inequality) {
1240 CGBuilderTy &Builder = CGF.Builder;
1241
1242 llvm::ICmpInst::Predicate Eq;
1243 llvm::Instruction::BinaryOps And, Or;
1244 if (Inequality) {
1245 Eq = llvm::ICmpInst::ICMP_NE;
1246 And = llvm::Instruction::Or;
1247 Or = llvm::Instruction::And;
1248 } else {
1249 Eq = llvm::ICmpInst::ICMP_EQ;
1250 And = llvm::Instruction::And;
1251 Or = llvm::Instruction::Or;
1252 }
1253
1254 // Member data pointers are easy because there's a unique null
1255 // value, so it just comes down to bitwise equality.
1256 if (MPT->isMemberDataPointer())
1257 return Builder.CreateICmp(Eq, L, R);
1258
1259 // For member function pointers, the tautologies are more complex.
1260 // The Itanium tautology is:
1261 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1262 // The ARM tautology is:
1263 // (L == R) <==> (L.ptr == R.ptr &&
1264 // (L.adj == R.adj ||
1265 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1266 // The inequality tautologies have exactly the same structure, except
1267 // applying De Morgan's laws.
1268
1269 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1270 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1271
1272 // This condition tests whether L.ptr == R.ptr. This must always be
1273 // true for equality to hold.
1274 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1275
1276 // This condition, together with the assumption that L.ptr == R.ptr,
1277 // tests whether the pointers are both null. ARM imposes an extra
1278 // condition.
1279 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1280 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1281
1282 // This condition tests whether L.adj == R.adj. If this isn't
1283 // true, the pointers are unequal unless they're both null.
1284 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1285 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1286 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1287
1288 // Null member function pointers on ARM clear the low bit of Adj,
1289 // so the zero condition has to check that neither low bit is set.
1290 if (UseARMMethodPtrABI) {
1291 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1292
1293 // Compute (l.adj | r.adj) & 1 and test it against zero.
1294 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1295 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1296 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1297 "cmp.or.adj");
1298 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1299 }
1300
1301 // Tie together all our conditions.
1302 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1303 Result = Builder.CreateBinOp(And, PtrEq, Result,
1304 Inequality ? "memptr.ne" : "memptr.eq");
1305 return Result;
1306}
1307
1308llvm::Value *
1309ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1310 llvm::Value *MemPtr,
1311 const MemberPointerType *MPT) {
1312 CGBuilderTy &Builder = CGF.Builder;
1313
1314 /// For member data pointers, this is just a check against -1.
1315 if (MPT->isMemberDataPointer()) {
1316 assert(MemPtr->getType() == CGM.PtrDiffTy);
1317 llvm::Value *NegativeOne =
1318 llvm::Constant::getAllOnesValue(MemPtr->getType());
1319 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1320 }
1321
1322 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1323 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1324
1325 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1326 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1327
1328 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1329 // (the virtual bit) is set.
1330 if (UseARMMethodPtrABI) {
1331 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1332 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1333 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1334 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1335 "memptr.isvirtual");
1336 Result = Builder.CreateOr(Result, IsVirtual);
1337 }
1338
1339 return Result;
1340}
1341
1342bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1343 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1344 if (!RD)
1345 return false;
1346
1347 // If C++ prohibits us from making a copy, return by address.
1348 if (!RD->canPassInRegisters()) {
1349 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1350 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1351 return true;
1352 }
1353 return false;
1354}
1355
1356/// The Itanium ABI requires non-zero initialization only for data
1357/// member pointers, for which '0' is a valid offset.
1358bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1359 return MPT->isMemberFunctionPointer();
1360}
1361
1362/// The Itanium ABI always places an offset to the complete object
1363/// at entry -2 in the vtable.
1364void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1365 const CXXDeleteExpr *DE,
1366 Address Ptr,
1367 QualType ElementType,
1368 const CXXDestructorDecl *Dtor) {
1369 bool UseGlobalDelete = DE->isGlobalDelete();
1370 if (UseGlobalDelete) {
1371 // Derive the complete-object pointer, which is what we need
1372 // to pass to the deallocation function.
1373
1374 // Grab the vtable pointer as an intptr_t*.
1375 auto *ClassDecl =
1376 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1377 llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
1378
1379 // Track back to entry -2 and pull out the offset there.
1380 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1381 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1382 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,
1383 CGF.getPointerAlign());
1384
1385 // Apply the offset.
1386 llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
1387 CompletePtr =
1388 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1389
1390 // If we're supposed to call the global delete, make sure we do so
1391 // even if the destructor throws.
1392 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1393 ElementType);
1394 }
1395
1396 // FIXME: Provide a source location here even though there's no
1397 // CXXMemberCallExpr for dtor call.
1398 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1399 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1400
1401 if (UseGlobalDelete)
1402 CGF.PopCleanupBlock();
1403}
1404
1405void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1406 // void __cxa_rethrow();
1407
1408 llvm::FunctionType *FTy =
1409 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1410
1411 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1412
1413 if (isNoReturn)
1414 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt);
1415 else
1417}
1418
1419static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1420 // void *__cxa_allocate_exception(size_t thrown_size);
1421
1422 llvm::FunctionType *FTy =
1423 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1424
1425 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1426}
1427
1428static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1429 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1430 // void (*dest) (void *));
1431
1432 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1433 llvm::FunctionType *FTy =
1434 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1435
1436 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1437}
1438
1439void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1440 QualType ThrowType = E->getSubExpr()->getType();
1441 // Now allocate the exception object.
1442 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1443 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1444
1445 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1446 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1447 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1448
1449 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1450 CGF.EmitAnyExprToExn(
1451 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1452
1453 // Now throw the exception.
1454 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1455 /*ForEH=*/true);
1456
1457 // The address of the destructor. If the exception type has a
1458 // trivial destructor (or isn't a record), we just pass null.
1459 llvm::Constant *Dtor = nullptr;
1460 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1461 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1462 if (!Record->hasTrivialDestructor()) {
1463 // __cxa_throw is declared to take its destructor as void (*)(void *). We
1464 // must match that if function pointers can be authenticated with a
1465 // discriminator based on their type.
1466 const ASTContext &Ctx = getContext();
1467 QualType DtorTy = Ctx.getFunctionType(Ctx.VoidTy, {Ctx.VoidPtrTy},
1469
1470 CXXDestructorDecl *DtorD = Record->getDestructor();
1471 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1472 Dtor = CGM.getFunctionPointer(Dtor, DtorTy);
1473 }
1474 }
1475 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1476
1477 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1479}
1480
1481static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1482 // void *__dynamic_cast(const void *sub,
1483 // global_as const abi::__class_type_info *src,
1484 // global_as const abi::__class_type_info *dst,
1485 // std::ptrdiff_t src2dst_offset);
1486
1487 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1488 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1489 llvm::Type *PtrDiffTy =
1491
1492 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1493
1494 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1495
1496 // Mark the function as nounwind willreturn readonly.
1497 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1498 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1499 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
1500 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
1501 llvm::AttributeList Attrs = llvm::AttributeList::get(
1502 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1503
1504 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1505}
1506
1507static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1508 // void __cxa_bad_cast();
1509 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1510 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1511}
1512
1513/// Compute the src2dst_offset hint as described in the
1514/// Itanium C++ ABI [2.9.7]
1516 const CXXRecordDecl *Src,
1517 const CXXRecordDecl *Dst) {
1518 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1519 /*DetectVirtual=*/false);
1520
1521 // If Dst is not derived from Src we can skip the whole computation below and
1522 // return that Src is not a public base of Dst. Record all inheritance paths.
1523 if (!Dst->isDerivedFrom(Src, Paths))
1524 return CharUnits::fromQuantity(-2ULL);
1525
1526 unsigned NumPublicPaths = 0;
1527 CharUnits Offset;
1528
1529 // Now walk all possible inheritance paths.
1530 for (const CXXBasePath &Path : Paths) {
1531 if (Path.Access != AS_public) // Ignore non-public inheritance.
1532 continue;
1533
1534 ++NumPublicPaths;
1535
1536 for (const CXXBasePathElement &PathElement : Path) {
1537 // If the path contains a virtual base class we can't give any hint.
1538 // -1: no hint.
1539 if (PathElement.Base->isVirtual())
1540 return CharUnits::fromQuantity(-1ULL);
1541
1542 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1543 continue;
1544
1545 // Accumulate the base class offsets.
1546 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1547 Offset += L.getBaseClassOffset(
1548 PathElement.Base->getType()->getAsCXXRecordDecl());
1549 }
1550 }
1551
1552 // -2: Src is not a public base of Dst.
1553 if (NumPublicPaths == 0)
1554 return CharUnits::fromQuantity(-2ULL);
1555
1556 // -3: Src is a multiple public base type but never a virtual base type.
1557 if (NumPublicPaths > 1)
1558 return CharUnits::fromQuantity(-3ULL);
1559
1560 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1561 // Return the offset of Src from the origin of Dst.
1562 return Offset;
1563}
1564
1565static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1566 // void __cxa_bad_typeid();
1567 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1568
1569 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1570}
1571
1572bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
1573 return true;
1574}
1575
1576void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1577 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1578 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1579 Call->setDoesNotReturn();
1580 CGF.Builder.CreateUnreachable();
1581}
1582
1583llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1584 QualType SrcRecordTy,
1585 Address ThisPtr,
1586 llvm::Type *StdTypeInfoPtrTy) {
1587 auto *ClassDecl =
1588 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1589 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
1590 ClassDecl);
1591
1592 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1593 // Load the type info.
1594 Value = CGF.Builder.CreateCall(
1595 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1596 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1597 } else {
1598 // Load the type info.
1599 Value =
1600 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1601 }
1602 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1603 CGF.getPointerAlign());
1604}
1605
1606bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1607 QualType SrcRecordTy) {
1608 return SrcIsPtr;
1609}
1610
1611llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1612 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1613 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1614 llvm::Type *PtrDiffLTy =
1616
1617 llvm::Value *SrcRTTI =
1619 llvm::Value *DestRTTI =
1621
1622 // Compute the offset hint.
1623 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1624 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1625 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1626 PtrDiffLTy,
1627 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1628
1629 // Emit the call to __dynamic_cast.
1630 llvm::Value *Value = ThisAddr.emitRawPointer(CGF);
1631 if (CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) {
1632 // We perform a no-op load of the vtable pointer here to force an
1633 // authentication. In environments that do not support pointer
1634 // authentication this is a an actual no-op that will be elided. When
1635 // pointer authentication is supported and enforced on vtable pointers this
1636 // load can trap.
1637 llvm::Value *Vtable =
1638 CGF.GetVTablePtr(ThisAddr, CGM.Int8PtrTy, SrcDecl,
1639 CodeGenFunction::VTableAuthMode::MustTrap);
1640 assert(Vtable);
1641 (void)Vtable;
1642 }
1643
1644 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1646
1647 /// C++ [expr.dynamic.cast]p9:
1648 /// A failed cast to reference type throws std::bad_cast
1649 if (DestTy->isReferenceType()) {
1650 llvm::BasicBlock *BadCastBlock =
1651 CGF.createBasicBlock("dynamic_cast.bad_cast");
1652
1653 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1654 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1655
1656 CGF.EmitBlock(BadCastBlock);
1657 EmitBadCastCall(CGF);
1658 }
1659
1660 return Value;
1661}
1662
1663llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1664 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1665 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
1666 llvm::BasicBlock *CastFail) {
1667 ASTContext &Context = getContext();
1668
1669 // Find all the inheritance paths.
1670 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1671 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1672 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1673 /*DetectVirtual=*/false);
1674 (void)DestDecl->isDerivedFrom(SrcDecl, Paths);
1675
1676 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1677 // might appear.
1678 std::optional<CharUnits> Offset;
1679 for (const CXXBasePath &Path : Paths) {
1680 // dynamic_cast only finds public inheritance paths.
1681 if (Path.Access != AS_public)
1682 continue;
1683
1684 CharUnits PathOffset;
1685 for (const CXXBasePathElement &PathElement : Path) {
1686 // Find the offset along this inheritance step.
1687 const CXXRecordDecl *Base =
1688 PathElement.Base->getType()->getAsCXXRecordDecl();
1689 if (PathElement.Base->isVirtual()) {
1690 // For a virtual base class, we know that the derived class is exactly
1691 // DestDecl, so we can use the vbase offset from its layout.
1692 const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl);
1693 PathOffset = L.getVBaseClassOffset(Base);
1694 } else {
1695 const ASTRecordLayout &L =
1696 Context.getASTRecordLayout(PathElement.Class);
1697 PathOffset += L.getBaseClassOffset(Base);
1698 }
1699 }
1700
1701 if (!Offset)
1702 Offset = PathOffset;
1703 else if (Offset != PathOffset) {
1704 // Base appears in at least two different places. Find the most-derived
1705 // object and see if it's a DestDecl. Note that the most-derived object
1706 // must be at least as aligned as this base class subobject, and must
1707 // have a vptr at offset 0.
1708 ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
1709 CGF.VoidPtrTy, ThisAddr.getAlignment());
1710 SrcDecl = DestDecl;
1711 Offset = CharUnits::Zero();
1712 break;
1713 }
1714 }
1715
1716 if (!Offset) {
1717 // If there are no public inheritance paths, the cast always fails.
1718 CGF.EmitBranch(CastFail);
1719 return llvm::PoisonValue::get(CGF.VoidPtrTy);
1720 }
1721
1722 // Compare the vptr against the expected vptr for the destination type at
1723 // this offset. Note that we do not know what type ThisAddr points to in
1724 // the case where the derived class multiply inherits from the base class
1725 // so we can't use GetVTablePtr, so we load the vptr directly instead.
1726 llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
1727 ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
1728 CGM.DecorateInstructionWithTBAA(
1729 VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
1730 llvm::Value *Success = CGF.Builder.CreateICmpEQ(
1731 VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
1732 llvm::Value *Result = ThisAddr.emitRawPointer(CGF);
1733 if (!Offset->isZero())
1734 Result = CGF.Builder.CreateInBoundsGEP(
1735 CGF.CharTy, Result,
1736 {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
1737 CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
1738 return Result;
1739}
1740
1741llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1742 Address ThisAddr,
1743 QualType SrcRecordTy) {
1744 auto *ClassDecl =
1745 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1746 llvm::Value *OffsetToTop;
1747 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1748 // Get the vtable pointer.
1749 llvm::Value *VTable =
1750 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1751
1752 // Get the offset-to-top from the vtable.
1753 OffsetToTop =
1754 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1755 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1756 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1757 } else {
1758 llvm::Type *PtrDiffLTy =
1760
1761 // Get the vtable pointer.
1762 llvm::Value *VTable =
1763 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1764
1765 // Get the offset-to-top from the vtable.
1766 OffsetToTop =
1767 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1768 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1769 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1770 }
1771 // Finally, add the offset to the pointer.
1772 return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.emitRawPointer(CGF),
1773 OffsetToTop);
1774}
1775
1776bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1777 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1778 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1779 Call->setDoesNotReturn();
1780 CGF.Builder.CreateUnreachable();
1781 return true;
1782}
1783
1784llvm::Value *
1785ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1786 Address This,
1787 const CXXRecordDecl *ClassDecl,
1788 const CXXRecordDecl *BaseClassDecl) {
1789 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1790 CharUnits VBaseOffsetOffset =
1791 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1792 BaseClassDecl);
1793 llvm::Value *VBaseOffsetPtr =
1794 CGF.Builder.CreateConstGEP1_64(
1795 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1796 "vbase.offset.ptr");
1797
1798 llvm::Value *VBaseOffset;
1799 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1800 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1801 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1802 "vbase.offset");
1803 } else {
1804 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1805 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1806 }
1807 return VBaseOffset;
1808}
1809
1810void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1811 // Just make sure we're in sync with TargetCXXABI.
1812 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1813
1814 // The constructor used for constructing this as a base class;
1815 // ignores virtual bases.
1816 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1817
1818 // The constructor used for constructing this as a complete class;
1819 // constructs the virtual bases, then calls the base constructor.
1820 if (!D->getParent()->isAbstract()) {
1821 // We don't need to emit the complete ctor if the class is abstract.
1822 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1823 }
1824}
1825
1827ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1829 ASTContext &Context = getContext();
1830
1831 // All parameters are already in place except VTT, which goes after 'this'.
1832 // These are Clang types, so we don't need to worry about sret yet.
1833
1834 // Check if we need to add a VTT parameter (which has type global void **).
1835 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1836 : GD.getDtorType() == Dtor_Base) &&
1837 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1838 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1839 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1840 ArgTys.insert(ArgTys.begin() + 1,
1842 return AddedStructorArgCounts::prefix(1);
1843 }
1844 return AddedStructorArgCounts{};
1845}
1846
1847void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1848 // The destructor used for destructing this as a base class; ignores
1849 // virtual bases.
1850 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1851
1852 // The destructor used for destructing this as a most-derived class;
1853 // call the base destructor and then destructs any virtual bases.
1854 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1855
1856 // The destructor in a virtual table is always a 'deleting'
1857 // destructor, which calls the complete destructor and then uses the
1858 // appropriate operator delete.
1859 if (D->isVirtual())
1860 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1861}
1862
1863void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1864 QualType &ResTy,
1865 FunctionArgList &Params) {
1866 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1867 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1868
1869 // Check if we need a VTT parameter as well.
1870 if (NeedsVTTParameter(CGF.CurGD)) {
1871 ASTContext &Context = getContext();
1872
1873 // FIXME: avoid the fake decl
1874 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1875 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1876 QualType T = Context.getPointerType(Q);
1877 auto *VTTDecl = ImplicitParamDecl::Create(
1878 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1879 T, ImplicitParamKind::CXXVTT);
1880 Params.insert(Params.begin() + 1, VTTDecl);
1881 getStructorImplicitParamDecl(CGF) = VTTDecl;
1882 }
1883}
1884
1885void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1886 // Naked functions have no prolog.
1887 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1888 return;
1889
1890 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1891 /// adjustments are required, because they are all handled by thunks.
1892 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1893
1894 /// Initialize the 'vtt' slot if needed.
1895 if (getStructorImplicitParamDecl(CGF)) {
1896 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1897 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1898 }
1899
1900 /// If this is a function that the ABI specifies returns 'this', initialize
1901 /// the return slot to 'this' at the start of the function.
1902 ///
1903 /// Unlike the setting of return types, this is done within the ABI
1904 /// implementation instead of by clients of CGCXXABI because:
1905 /// 1) getThisValue is currently protected
1906 /// 2) in theory, an ABI could implement 'this' returns some other way;
1907 /// HasThisReturn only specifies a contract, not the implementation
1908 if (HasThisReturn(CGF.CurGD))
1909 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1910}
1911
1912CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1914 bool ForVirtualBase, bool Delegating) {
1915 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1916 return AddedStructorArgs{};
1917
1918 // Insert the implicit 'vtt' argument as the second argument. Make sure to
1919 // correctly reflect its address space, which can differ from generic on
1920 // some targets.
1921 llvm::Value *VTT =
1922 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1923 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1924 QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS);
1925 QualType VTTTy = getContext().getPointerType(Q);
1926 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1927}
1928
1929llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1931 bool ForVirtualBase, bool Delegating) {
1932 GlobalDecl GD(DD, Type);
1933 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1934}
1935
1936void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1937 const CXXDestructorDecl *DD,
1938 CXXDtorType Type, bool ForVirtualBase,
1939 bool Delegating, Address This,
1940 QualType ThisTy) {
1941 GlobalDecl GD(DD, Type);
1942 llvm::Value *VTT =
1943 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1944 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1945
1947 if (getContext().getLangOpts().AppleKext &&
1948 Type != Dtor_Base && DD->isVirtual())
1950 else
1951 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1952
1953 CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy),
1954 ThisTy, VTT, VTTTy, nullptr);
1955}
1956
1957// Check if any non-inline method has the specified attribute.
1958template <typename T>
1960 for (const auto *D : RD->noload_decls()) {
1961 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1962 if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
1963 FD->isPureVirtual())
1964 continue;
1965 if (D->hasAttr<T>())
1966 return true;
1967 }
1968 }
1969
1970 return false;
1971}
1972
1974 llvm::GlobalVariable *VTable,
1975 const CXXRecordDecl *RD) {
1976 if (VTable->getDLLStorageClass() !=
1977 llvm::GlobalVariable::DefaultStorageClass ||
1978 RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
1979 return;
1980
1981 if (CGM.getVTables().isVTableExternal(RD)) {
1982 if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
1983 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1984 } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
1985 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1986}
1987
1988void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1989 const CXXRecordDecl *RD) {
1990 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1991 if (VTable->hasInitializer())
1992 return;
1993
1994 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1995 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1996 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1997 llvm::Constant *RTTI =
1998 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1999
2000 // Create and set the initializer.
2001 ConstantInitBuilder builder(CGM);
2002 auto components = builder.beginStruct();
2003 CGVT.createVTableInitializer(components, VTLayout, RTTI,
2004 llvm::GlobalValue::isLocalLinkage(Linkage));
2005 components.finishAndSetAsInitializer(VTable);
2006
2007 // Set the correct linkage.
2008 VTable->setLinkage(Linkage);
2009
2010 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
2011 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
2012
2013 if (CGM.getTarget().hasPS4DLLImportExport())
2014 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2015
2016 // Set the right visibility.
2017 CGM.setGVProperties(VTable, RD);
2018
2019 // If this is the magic class __cxxabiv1::__fundamental_type_info,
2020 // we will emit the typeinfo for the fundamental types. This is the
2021 // same behaviour as GCC.
2022 const DeclContext *DC = RD->getDeclContext();
2023 if (RD->getIdentifier() &&
2024 RD->getIdentifier()->isStr("__fundamental_type_info") &&
2025 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
2026 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
2028 EmitFundamentalRTTIDescriptors(RD);
2029
2030 // Always emit type metadata on non-available_externally definitions, and on
2031 // available_externally definitions if we are performing whole program
2032 // devirtualization. For WPD we need the type metadata on all vtable
2033 // definitions to ensure we associate derived classes with base classes
2034 // defined in headers but with a strong definition only in a shared library.
2035 if (!VTable->isDeclarationForLinker() ||
2036 CGM.getCodeGenOpts().WholeProgramVTables) {
2037 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
2038 // For available_externally definitions, add the vtable to
2039 // @llvm.compiler.used so that it isn't deleted before whole program
2040 // analysis.
2041 if (VTable->isDeclarationForLinker()) {
2042 assert(CGM.getCodeGenOpts().WholeProgramVTables);
2043 CGM.addCompilerUsedGlobal(VTable);
2044 }
2045 }
2046
2047 if (VTContext.isRelativeLayout()) {
2048 CGVT.RemoveHwasanMetadata(VTable);
2049 if (!VTable->isDSOLocal())
2050 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
2051 }
2052}
2053
2054bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
2055 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
2056 if (Vptr.NearestVBase == nullptr)
2057 return false;
2058 return NeedsVTTParameter(CGF.CurGD);
2059}
2060
2061llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
2062 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2063 const CXXRecordDecl *NearestVBase) {
2064
2065 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2066 NeedsVTTParameter(CGF.CurGD)) {
2067 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
2068 NearestVBase);
2069 }
2070 return getVTableAddressPoint(Base, VTableClass);
2071}
2072
2073llvm::Constant *
2074ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
2075 const CXXRecordDecl *VTableClass) {
2076 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
2077
2078 // Find the appropriate vtable within the vtable group, and the address point
2079 // within that vtable.
2080 const VTableLayout &Layout =
2081 CGM.getItaniumVTableContext().getVTableLayout(VTableClass);
2083 Layout.getAddressPoint(Base);
2084 llvm::Value *Indices[] = {
2085 llvm::ConstantInt::get(CGM.Int32Ty, 0),
2086 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
2087 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
2088 };
2089
2090 // Add inrange attribute to indicate that only the VTableIndex can be
2091 // accessed.
2092 unsigned ComponentSize =
2093 CGM.getDataLayout().getTypeAllocSize(CGM.getVTableComponentType());
2094 unsigned VTableSize =
2095 ComponentSize * Layout.getVTableSize(AddressPoint.VTableIndex);
2096 unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
2097 llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
2098 llvm::APInt(32, VTableSize - Offset, true));
2099 return llvm::ConstantExpr::getGetElementPtr(
2100 VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange);
2101}
2102
2103llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
2104 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2105 const CXXRecordDecl *NearestVBase) {
2106 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2107 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
2108
2109 // Get the secondary vpointer index.
2110 uint64_t VirtualPointerIndex =
2111 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
2112
2113 /// Load the VTT.
2114 llvm::Value *VTT = CGF.LoadCXXVTT();
2115 if (VirtualPointerIndex)
2116 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT,
2117 VirtualPointerIndex);
2118
2119 // And load the address point from the VTT.
2120 llvm::Value *AP =
2122 CGF.getPointerAlign());
2123
2124 if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) {
2125 CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTT,
2126 GlobalDecl(),
2127 QualType());
2128 AP = CGF.EmitPointerAuthAuth(PointerAuth, AP);
2129 }
2130
2131 return AP;
2132}
2133
2134llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
2135 CharUnits VPtrOffset) {
2136 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
2137
2138 llvm::GlobalVariable *&VTable = VTables[RD];
2139 if (VTable)
2140 return VTable;
2141
2142 // Queue up this vtable for possible deferred emission.
2143 CGM.addDeferredVTable(RD);
2144
2145 SmallString<256> Name;
2146 llvm::raw_svector_ostream Out(Name);
2147 getMangleContext().mangleCXXVTable(RD, Out);
2148
2149 const VTableLayout &VTLayout =
2150 CGM.getItaniumVTableContext().getVTableLayout(RD);
2151 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
2152
2153 // Use pointer to global alignment for the vtable. Otherwise we would align
2154 // them based on the size of the initializer which doesn't make sense as only
2155 // single values are read.
2156 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
2157 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
2158 ? 32
2159 : CGM.getTarget().getPointerAlign(AS);
2160
2161 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
2162 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
2163 getContext().toCharUnitsFromBits(PAlign).getAsAlign());
2164 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2165
2166 if (CGM.getTarget().hasPS4DLLImportExport())
2167 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2168
2169 CGM.setGVProperties(VTable, RD);
2170 return VTable;
2171}
2172
2173CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2174 GlobalDecl GD,
2175 Address This,
2176 llvm::Type *Ty,
2178 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2179 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
2180 llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
2181
2182 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2183 llvm::Value *VFunc, *VTableSlotPtr = nullptr;
2184 auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers;
2185 if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
2186 VFunc = CGF.EmitVTableTypeCheckedLoad(
2187 MethodDecl->getParent(), VTable, PtrTy,
2188 VTableIndex *
2189 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
2190 8);
2191 } else {
2192 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
2193
2194 llvm::Value *VFuncLoad;
2195 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2196 VFuncLoad = CGF.Builder.CreateCall(
2197 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
2198 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
2199 } else {
2200 VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2201 PtrTy, VTable, VTableIndex, "vfn");
2202 VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
2203 CGF.getPointerAlign());
2204 }
2205
2206 // Add !invariant.load md to virtual function load to indicate that
2207 // function didn't change inside vtable.
2208 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2209 // help in devirtualization because it will only matter if we will have 2
2210 // the same virtual function loads from the same vtable load, which won't
2211 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2212 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2213 CGM.getCodeGenOpts().StrictVTablePointers) {
2214 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
2215 VFuncLoadInstr->setMetadata(
2216 llvm::LLVMContext::MD_invariant_load,
2217 llvm::MDNode::get(CGM.getLLVMContext(),
2219 }
2220 }
2221 VFunc = VFuncLoad;
2222 }
2223
2224 CGPointerAuthInfo PointerAuth;
2225 if (Schema) {
2226 assert(VTableSlotPtr && "virtual function pointer not set");
2227 GD = CGM.getItaniumVTableContext().findOriginalMethod(GD.getCanonicalDecl());
2228 PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTableSlotPtr, GD, QualType());
2229 }
2230 CGCallee Callee(GD, VFunc, PointerAuth);
2231 return Callee;
2232}
2233
2234llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2235 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2236 Address This, DeleteOrMemberCallExpr E) {
2237 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2238 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2239 assert((CE != nullptr) ^ (D != nullptr));
2240 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2241 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2242
2243 GlobalDecl GD(Dtor, DtorType);
2244 const CGFunctionInfo *FInfo =
2245 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2246 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2247 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2248
2249 QualType ThisTy;
2250 if (CE) {
2251 ThisTy = CE->getObjectType();
2252 } else {
2253 ThisTy = D->getDestroyedType();
2254 }
2255
2256 CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy,
2257 nullptr, QualType(), nullptr);
2258 return nullptr;
2259}
2260
2261void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2262 CodeGenVTables &VTables = CGM.getVTables();
2263 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2264 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2265}
2266
2267bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2268 const CXXRecordDecl *RD) const {
2269 // We don't emit available_externally vtables if we are in -fapple-kext mode
2270 // because kext mode does not permit devirtualization.
2271 if (CGM.getLangOpts().AppleKext)
2272 return false;
2273
2274 // If the vtable is hidden then it is not safe to emit an available_externally
2275 // copy of vtable.
2276 if (isVTableHidden(RD))
2277 return false;
2278
2279 if (CGM.getCodeGenOpts().ForceEmitVTables)
2280 return true;
2281
2282 // If we don't have any not emitted inline virtual function then we are safe
2283 // to emit an available_externally copy of vtable.
2284 // FIXME we can still emit a copy of the vtable if we
2285 // can emit definition of the inline functions.
2286 if (hasAnyUnusedVirtualInlineFunction(RD))
2287 return false;
2288
2289 // For a class with virtual bases, we must also be able to speculatively
2290 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2291 // the vtable" and "can emit the VTT". For a base subobject, this means we
2292 // need to be able to emit non-virtual base vtables.
2293 if (RD->getNumVBases()) {
2294 for (const auto &B : RD->bases()) {
2295 auto *BRD = B.getType()->getAsCXXRecordDecl();
2296 assert(BRD && "no class for base specifier");
2297 if (B.isVirtual() || !BRD->isDynamicClass())
2298 continue;
2299 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2300 return false;
2301 }
2302 }
2303
2304 return true;
2305}
2306
2307bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2308 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2309 return false;
2310
2312 return false;
2313
2314 // For a complete-object vtable (or more specifically, for the VTT), we need
2315 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2316 for (const auto &B : RD->vbases()) {
2317 auto *BRD = B.getType()->getAsCXXRecordDecl();
2318 assert(BRD && "no class for base specifier");
2319 if (!BRD->isDynamicClass())
2320 continue;
2321 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2322 return false;
2323 }
2324
2325 return true;
2326}
2328 Address InitialPtr,
2329 const CXXRecordDecl *UnadjustedClass,
2330 int64_t NonVirtualAdjustment,
2331 int64_t VirtualAdjustment,
2332 bool IsReturnAdjustment) {
2333 if (!NonVirtualAdjustment && !VirtualAdjustment)
2334 return InitialPtr.emitRawPointer(CGF);
2335
2336 Address V = InitialPtr.withElementType(CGF.Int8Ty);
2337
2338 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2339 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2341 CharUnits::fromQuantity(NonVirtualAdjustment));
2342 }
2343
2344 // Perform the virtual adjustment if we have one.
2345 llvm::Value *ResultPtr;
2346 if (VirtualAdjustment) {
2347 llvm::Value *VTablePtr =
2348 CGF.GetVTablePtr(V, CGF.Int8PtrTy, UnadjustedClass);
2349
2350 llvm::Value *Offset;
2351 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2352 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2354 // Load the adjustment offset from the vtable as a 32-bit int.
2355 Offset =
2356 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2358 } else {
2359 llvm::Type *PtrDiffTy =
2361
2362 // Load the adjustment offset from the vtable.
2363 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2364 CGF.getPointerAlign());
2365 }
2366 // Adjust our pointer.
2367 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getElementType(),
2368 V.emitRawPointer(CGF), Offset);
2369 } else {
2370 ResultPtr = V.emitRawPointer(CGF);
2371 }
2372
2373 // In a derived-to-base conversion, the non-virtual adjustment is
2374 // applied second.
2375 if (NonVirtualAdjustment && IsReturnAdjustment) {
2376 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2377 NonVirtualAdjustment);
2378 }
2379
2380 return ResultPtr;
2381}
2382
2383llvm::Value *
2384ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This,
2385 const CXXRecordDecl *UnadjustedClass,
2386 const ThunkInfo &TI) {
2387 return performTypeAdjustment(CGF, This, UnadjustedClass, TI.This.NonVirtual,
2389 /*IsReturnAdjustment=*/false);
2390}
2391
2392llvm::Value *
2393ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2394 const CXXRecordDecl *UnadjustedClass,
2395 const ReturnAdjustment &RA) {
2396 return performTypeAdjustment(CGF, Ret, UnadjustedClass, RA.NonVirtual,
2398 /*IsReturnAdjustment=*/true);
2399}
2400
2401void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2402 RValue RV, QualType ResultType) {
2403 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2404 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2405
2406 // Destructor thunks in the ARM ABI have indeterminate results.
2407 llvm::Type *T = CGF.ReturnValue.getElementType();
2408 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2409 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2410}
2411
2412/************************** Array allocation cookies **************************/
2413
2414CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2415 // The array cookie is a size_t; pad that up to the element alignment.
2416 // The cookie is actually right-justified in that space.
2417 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2418 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2419}
2420
2421Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2422 Address NewPtr,
2423 llvm::Value *NumElements,
2424 const CXXNewExpr *expr,
2425 QualType ElementType) {
2426 assert(requiresArrayCookie(expr));
2427
2428 unsigned AS = NewPtr.getAddressSpace();
2429
2430 ASTContext &Ctx = getContext();
2431 CharUnits SizeSize = CGF.getSizeSize();
2432
2433 // The size of the cookie.
2434 CharUnits CookieSize =
2435 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2436 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2437
2438 // Compute an offset to the cookie.
2439 Address CookiePtr = NewPtr;
2440 CharUnits CookieOffset = CookieSize - SizeSize;
2441 if (!CookieOffset.isZero())
2442 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2443
2444 // Write the number of elements into the appropriate slot.
2445 Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy);
2446 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2447
2448 // Handle the array cookie specially in ASan.
2449 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2450 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2451 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2452 // The store to the CookiePtr does not need to be instrumented.
2453 SI->setNoSanitizeMetadata();
2454 llvm::FunctionType *FTy =
2455 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2456 llvm::FunctionCallee F =
2457 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2458 CGF.Builder.CreateCall(F, NumElementsPtr.emitRawPointer(CGF));
2459 }
2460
2461 // Finally, compute a pointer to the actual data buffer by skipping
2462 // over the cookie completely.
2463 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2464}
2465
2466llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2467 Address allocPtr,
2468 CharUnits cookieSize) {
2469 // The element size is right-justified in the cookie.
2470 Address numElementsPtr = allocPtr;
2471 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2472 if (!numElementsOffset.isZero())
2473 numElementsPtr =
2474 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2475
2476 unsigned AS = allocPtr.getAddressSpace();
2477 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2478 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2479 return CGF.Builder.CreateLoad(numElementsPtr);
2480 // In asan mode emit a function call instead of a regular load and let the
2481 // run-time deal with it: if the shadow is properly poisoned return the
2482 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2483 // We can't simply ignore this load using nosanitize metadata because
2484 // the metadata may be lost.
2485 llvm::FunctionType *FTy =
2486 llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
2487 llvm::FunctionCallee F =
2488 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2489 return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
2490}
2491
2492CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2493 // ARM says that the cookie is always:
2494 // struct array_cookie {
2495 // std::size_t element_size; // element_size != 0
2496 // std::size_t element_count;
2497 // };
2498 // But the base ABI doesn't give anything an alignment greater than
2499 // 8, so we can dismiss this as typical ABI-author blindness to
2500 // actual language complexity and round up to the element alignment.
2501 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2502 CGM.getContext().getTypeAlignInChars(elementType));
2503}
2504
2505Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2506 Address newPtr,
2507 llvm::Value *numElements,
2508 const CXXNewExpr *expr,
2509 QualType elementType) {
2510 assert(requiresArrayCookie(expr));
2511
2512 // The cookie is always at the start of the buffer.
2513 Address cookie = newPtr;
2514
2515 // The first element is the element size.
2516 cookie = cookie.withElementType(CGF.SizeTy);
2517 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2518 getContext().getTypeSizeInChars(elementType).getQuantity());
2519 CGF.Builder.CreateStore(elementSize, cookie);
2520
2521 // The second element is the element count.
2522 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2523 CGF.Builder.CreateStore(numElements, cookie);
2524
2525 // Finally, compute a pointer to the actual data buffer by skipping
2526 // over the cookie completely.
2527 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2528 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2529}
2530
2531llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2532 Address allocPtr,
2533 CharUnits cookieSize) {
2534 // The number of elements is at offset sizeof(size_t) relative to
2535 // the allocated pointer.
2536 Address numElementsPtr
2537 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2538
2539 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2540 return CGF.Builder.CreateLoad(numElementsPtr);
2541}
2542
2543/*********************** Static local initialization **************************/
2544
2545static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2546 llvm::PointerType *GuardPtrTy) {
2547 // int __cxa_guard_acquire(__guard *guard_object);
2548 llvm::FunctionType *FTy =
2549 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2550 GuardPtrTy, /*isVarArg=*/false);
2551 return CGM.CreateRuntimeFunction(
2552 FTy, "__cxa_guard_acquire",
2553 llvm::AttributeList::get(CGM.getLLVMContext(),
2554 llvm::AttributeList::FunctionIndex,
2555 llvm::Attribute::NoUnwind));
2556}
2557
2558static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2559 llvm::PointerType *GuardPtrTy) {
2560 // void __cxa_guard_release(__guard *guard_object);
2561 llvm::FunctionType *FTy =
2562 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2563 return CGM.CreateRuntimeFunction(
2564 FTy, "__cxa_guard_release",
2565 llvm::AttributeList::get(CGM.getLLVMContext(),
2566 llvm::AttributeList::FunctionIndex,
2567 llvm::Attribute::NoUnwind));
2568}
2569
2570static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2571 llvm::PointerType *GuardPtrTy) {
2572 // void __cxa_guard_abort(__guard *guard_object);
2573 llvm::FunctionType *FTy =
2574 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2575 return CGM.CreateRuntimeFunction(
2576 FTy, "__cxa_guard_abort",
2577 llvm::AttributeList::get(CGM.getLLVMContext(),
2578 llvm::AttributeList::FunctionIndex,
2579 llvm::Attribute::NoUnwind));
2580}
2581
2582namespace {
2583 struct CallGuardAbort final : EHScopeStack::Cleanup {
2584 llvm::GlobalVariable *Guard;
2585 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2586
2587 void Emit(CodeGenFunction &CGF, Flags flags) override {
2588 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2589 Guard);
2590 }
2591 };
2592}
2593
2594/// The ARM code here follows the Itanium code closely enough that we
2595/// just special-case it at particular places.
2596void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2597 const VarDecl &D,
2598 llvm::GlobalVariable *var,
2599 bool shouldPerformInit) {
2600 CGBuilderTy &Builder = CGF.Builder;
2601
2602 // Inline variables that weren't instantiated from variable templates have
2603 // partially-ordered initialization within their translation unit.
2604 bool NonTemplateInline =
2605 D.isInline() &&
2606 !isTemplateInstantiation(D.getTemplateSpecializationKind());
2607
2608 // We only need to use thread-safe statics for local non-TLS variables and
2609 // inline variables; other global initialization is always single-threaded
2610 // or (through lazy dynamic loading in multiple threads) unsequenced.
2611 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2612 (D.isLocalVarDecl() || NonTemplateInline) &&
2613 !D.getTLSKind();
2614
2615 // If we have a global variable with internal linkage and thread-safe statics
2616 // are disabled, we can just let the guard variable be of type i8.
2617 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2618
2619 llvm::IntegerType *guardTy;
2620 CharUnits guardAlignment;
2621 if (useInt8GuardVariable) {
2622 guardTy = CGF.Int8Ty;
2623 guardAlignment = CharUnits::One();
2624 } else {
2625 // Guard variables are 64 bits in the generic ABI and size width on ARM
2626 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2627 if (UseARMGuardVarABI) {
2628 guardTy = CGF.SizeTy;
2629 guardAlignment = CGF.getSizeAlign();
2630 } else {
2631 guardTy = CGF.Int64Ty;
2632 guardAlignment =
2633 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
2634 }
2635 }
2636 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2637 CGF.CGM.getLLVMContext(),
2638 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2639
2640 // Create the guard variable if we don't already have it (as we
2641 // might if we're double-emitting this function body).
2642 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2643 if (!guard) {
2644 // Mangle the name for the guard.
2645 SmallString<256> guardName;
2646 {
2647 llvm::raw_svector_ostream out(guardName);
2648 getMangleContext().mangleStaticGuardVariable(&D, out);
2649 }
2650
2651 // Create the guard variable with a zero-initializer.
2652 // Just absorb linkage, visibility and dll storage class from the guarded
2653 // variable.
2654 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2655 false, var->getLinkage(),
2656 llvm::ConstantInt::get(guardTy, 0),
2657 guardName.str());
2658 guard->setDSOLocal(var->isDSOLocal());
2659 guard->setVisibility(var->getVisibility());
2660 guard->setDLLStorageClass(var->getDLLStorageClass());
2661 // If the variable is thread-local, so is its guard variable.
2662 guard->setThreadLocalMode(var->getThreadLocalMode());
2663 guard->setAlignment(guardAlignment.getAsAlign());
2664
2665 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2666 // group as the associated data object." In practice, this doesn't work for
2667 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2668 llvm::Comdat *C = var->getComdat();
2669 if (!D.isLocalVarDecl() && C &&
2670 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2671 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2672 guard->setComdat(C);
2673 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2674 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2675 }
2676
2677 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2678 }
2679
2680 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2681
2682 // Test whether the variable has completed initialization.
2683 //
2684 // Itanium C++ ABI 3.3.2:
2685 // The following is pseudo-code showing how these functions can be used:
2686 // if (obj_guard.first_byte == 0) {
2687 // if ( __cxa_guard_acquire (&obj_guard) ) {
2688 // try {
2689 // ... initialize the object ...;
2690 // } catch (...) {
2691 // __cxa_guard_abort (&obj_guard);
2692 // throw;
2693 // }
2694 // ... queue object destructor with __cxa_atexit() ...;
2695 // __cxa_guard_release (&obj_guard);
2696 // }
2697 // }
2698 //
2699 // If threadsafe statics are enabled, but we don't have inline atomics, just
2700 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2701 // actually inline, and the user might not expect calls to __atomic libcalls.
2702
2703 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2704 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2705 if (!threadsafe || MaxInlineWidthInBits) {
2706 // Load the first byte of the guard variable.
2707 llvm::LoadInst *LI =
2708 Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty));
2709
2710 // Itanium ABI:
2711 // An implementation supporting thread-safety on multiprocessor
2712 // systems must also guarantee that references to the initialized
2713 // object do not occur before the load of the initialization flag.
2714 //
2715 // In LLVM, we do this by marking the load Acquire.
2716 if (threadsafe)
2717 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2718
2719 // For ARM, we should only check the first bit, rather than the entire byte:
2720 //
2721 // ARM C++ ABI 3.2.3.1:
2722 // To support the potential use of initialization guard variables
2723 // as semaphores that are the target of ARM SWP and LDREX/STREX
2724 // synchronizing instructions we define a static initialization
2725 // guard variable to be a 4-byte aligned, 4-byte word with the
2726 // following inline access protocol.
2727 // #define INITIALIZED 1
2728 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2729 // if (__cxa_guard_acquire(&obj_guard))
2730 // ...
2731 // }
2732 //
2733 // and similarly for ARM64:
2734 //
2735 // ARM64 C++ ABI 3.2.2:
2736 // This ABI instead only specifies the value bit 0 of the static guard
2737 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2738 // variable is not initialized and 1 when it is.
2739 llvm::Value *V =
2740 (UseARMGuardVarABI && !useInt8GuardVariable)
2741 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2742 : LI;
2743 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2744
2745 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2746
2747 // Check if the first byte of the guard variable is zero.
2748 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2749 CodeGenFunction::GuardKind::VariableGuard, &D);
2750
2751 CGF.EmitBlock(InitCheckBlock);
2752 }
2753
2754 // The semantics of dynamic initialization of variables with static or thread
2755 // storage duration depends on whether they are declared at block-scope. The
2756 // initialization of such variables at block-scope can be aborted with an
2757 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2758 // to their initialization has undefined behavior (also per C++20
2759 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2760 // lead to termination (per C++20 [except.terminate]p1), and recursive
2761 // references to the variables are governed only by the lifetime rules (per
2762 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2763 // long as they avoid touching memory. As a result, block-scope variables must
2764 // not be marked as initialized until after initialization completes (unless
2765 // the mark is reverted following an exception), but non-block-scope variables
2766 // must be marked prior to initialization so that recursive accesses during
2767 // initialization do not restart initialization.
2768
2769 // Variables used when coping with thread-safe statics and exceptions.
2770 if (threadsafe) {
2771 // Call __cxa_guard_acquire.
2772 llvm::Value *V
2773 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2774
2775 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2776
2777 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2778 InitBlock, EndBlock);
2779
2780 // Call __cxa_guard_abort along the exceptional edge.
2781 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2782
2783 CGF.EmitBlock(InitBlock);
2784 } else if (!D.isLocalVarDecl()) {
2785 // For non-local variables, store 1 into the first byte of the guard
2786 // variable before the object initialization begins so that references
2787 // to the variable during initialization don't restart initialization.
2788 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2789 guardAddr.withElementType(CGM.Int8Ty));
2790 }
2791
2792 // Emit the initializer and add a global destructor if appropriate.
2793 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2794
2795 if (threadsafe) {
2796 // Pop the guard-abort cleanup if we pushed one.
2797 CGF.PopCleanupBlock();
2798
2799 // Call __cxa_guard_release. This cannot throw.
2800 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2801 guardAddr.emitRawPointer(CGF));
2802 } else if (D.isLocalVarDecl()) {
2803 // For local variables, store 1 into the first byte of the guard variable
2804 // after the object initialization completes so that initialization is
2805 // retried if initialization is interrupted by an exception.
2806 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2807 guardAddr.withElementType(CGM.Int8Ty));
2808 }
2809
2810 CGF.EmitBlock(EndBlock);
2811}
2812
2813/// Register a global destructor using __cxa_atexit.
2815 llvm::FunctionCallee dtor,
2816 llvm::Constant *addr, bool TLS) {
2817 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2818 "unexpected call to emitGlobalDtorWithCXAAtExit");
2819 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2820 "__cxa_atexit is disabled");
2821 const char *Name = "__cxa_atexit";
2822 if (TLS) {
2823 const llvm::Triple &T = CGF.getTarget().getTriple();
2824 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2825 }
2826
2827 // We're assuming that the destructor function is something we can
2828 // reasonably call with the default CC.
2829 llvm::Type *dtorTy = CGF.UnqualPtrTy;
2830
2831 // Preserve address space of addr.
2832 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2833 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS)
2834 : CGF.Int8PtrTy;
2835
2836 // Create a variable that binds the atexit to this shared object.
2837 llvm::Constant *handle =
2838 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2839 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2840 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2841
2842 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2843 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2844 llvm::FunctionType *atexitTy =
2845 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2846
2847 // Fetch the actual function.
2848 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2849 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2850 fn->setDoesNotThrow();
2851
2852 const auto &Context = CGF.CGM.getContext();
2854 /*IsVariadic=*/false, /*IsCXXMethod=*/false));
2855 QualType fnType =
2856 Context.getFunctionType(Context.VoidTy, {Context.VoidPtrTy}, EPI);
2857 llvm::Constant *dtorCallee = cast<llvm::Constant>(dtor.getCallee());
2858 dtorCallee = CGF.CGM.getFunctionPointer(dtorCallee, fnType);
2859
2860 if (!addr)
2861 // addr is null when we are trying to register a dtor annotated with
2862 // __attribute__((destructor)) in a constructor function. Using null here is
2863 // okay because this argument is just passed back to the destructor
2864 // function.
2865 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2866
2867 llvm::Value *args[] = {dtorCallee, addr, handle};
2868 CGF.EmitNounwindRuntimeCall(atexit, args);
2869}
2870
2872 StringRef FnName) {
2873 // Create a function that registers/unregisters destructors that have the same
2874 // priority.
2875 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2876 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2877 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2878
2879 return GlobalInitOrCleanupFn;
2880}
2881
2882void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2883 for (const auto &I : DtorsUsingAtExit) {
2884 int Priority = I.first;
2885 std::string GlobalCleanupFnName =
2886 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2887
2888 llvm::Function *GlobalCleanupFn =
2889 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2890
2891 CodeGenFunction CGF(*this);
2892 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2893 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2896
2897 // Get the destructor function type, void(*)(void).
2898 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2899
2900 // Destructor functions are run/unregistered in non-ascending
2901 // order of their priorities.
2902 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2903 auto itv = Dtors.rbegin();
2904 while (itv != Dtors.rend()) {
2905 llvm::Function *Dtor = *itv;
2906
2907 // We're assuming that the destructor function is something we can
2908 // reasonably call with the correct CC.
2909 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor);
2910 llvm::Value *NeedsDestruct =
2911 CGF.Builder.CreateIsNull(V, "needs_destruct");
2912
2913 llvm::BasicBlock *DestructCallBlock =
2914 CGF.createBasicBlock("destruct.call");
2915 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2916 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2917 // Check if unatexit returns a value of 0. If it does, jump to
2918 // DestructCallBlock, otherwise jump to EndBlock directly.
2919 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2920
2921 CGF.EmitBlock(DestructCallBlock);
2922
2923 // Emit the call to casted Dtor.
2924 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor);
2925 // Make sure the call and the callee agree on calling convention.
2926 CI->setCallingConv(Dtor->getCallingConv());
2927
2928 CGF.EmitBlock(EndBlock);
2929
2930 itv++;
2931 }
2932
2933 CGF.FinishFunction();
2934 AddGlobalDtor(GlobalCleanupFn, Priority);
2935 }
2936}
2937
2938void CodeGenModule::registerGlobalDtorsWithAtExit() {
2939 for (const auto &I : DtorsUsingAtExit) {
2940 int Priority = I.first;
2941 std::string GlobalInitFnName =
2942 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2943 llvm::Function *GlobalInitFn =
2944 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2945
2946 CodeGenFunction CGF(*this);
2947 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2948 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2951
2952 // Since constructor functions are run in non-descending order of their
2953 // priorities, destructors are registered in non-descending order of their
2954 // priorities, and since destructor functions are run in the reverse order
2955 // of their registration, destructor functions are run in non-ascending
2956 // order of their priorities.
2957 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2958 for (auto *Dtor : Dtors) {
2959 // Register the destructor function calling __cxa_atexit if it is
2960 // available. Otherwise fall back on calling atexit.
2961 if (getCodeGenOpts().CXAAtExit) {
2962 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2963 } else {
2964 // We're assuming that the destructor function is something we can
2965 // reasonably call with the correct CC.
2967 }
2968 }
2969
2970 CGF.FinishFunction();
2971 AddGlobalCtor(GlobalInitFn, Priority);
2972 }
2973
2974 if (getCXXABI().useSinitAndSterm())
2975 unregisterGlobalDtorsWithUnAtExit();
2976}
2977
2978/// Register a global destructor as best as we know how.
2979void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2980 llvm::FunctionCallee dtor,
2981 llvm::Constant *addr) {
2982 if (D.isNoDestroy(CGM.getContext()))
2983 return;
2984
2985 // OpenMP offloading supports C++ constructors and destructors but we do not
2986 // always have 'atexit' available. Instead lower these to use the LLVM global
2987 // destructors which we can handle directly in the runtime. Note that this is
2988 // not strictly 1-to-1 with using `atexit` because we no longer tear down
2989 // globals in reverse order of when they were constructed.
2990 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
2991 return CGF.registerGlobalDtorWithLLVM(D, dtor, addr);
2992
2993 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2994 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2995 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2996 // We can always use __cxa_thread_atexit.
2997 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2998 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2999
3000 // In Apple kexts, we want to add a global destructor entry.
3001 // FIXME: shouldn't this be guarded by some variable?
3002 if (CGM.getLangOpts().AppleKext) {
3003 // Generate a global destructor entry.
3004 return CGM.AddCXXDtorEntry(dtor, addr);
3005 }
3006
3007 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
3008}
3009
3012 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
3013 // Darwin prefers to have references to thread local variables to go through
3014 // the thread wrapper instead of directly referencing the backing variable.
3015 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3016 CGM.getTarget().getTriple().isOSDarwin();
3017}
3018
3019/// Get the appropriate linkage for the wrapper function. This is essentially
3020/// the weak form of the variable's linkage; every translation unit which needs
3021/// the wrapper emits a copy, and we want the linker to merge them.
3022static llvm::GlobalValue::LinkageTypes
3024 llvm::GlobalValue::LinkageTypes VarLinkage =
3026
3027 // For internal linkage variables, we don't need an external or weak wrapper.
3028 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
3029 return VarLinkage;
3030
3031 // If the thread wrapper is replaceable, give it appropriate linkage.
3032 if (isThreadWrapperReplaceable(VD, CGM))
3033 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
3034 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
3035 return VarLinkage;
3036 return llvm::GlobalValue::WeakODRLinkage;
3037}
3038
3039llvm::Function *
3040ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
3041 llvm::Value *Val) {
3042 // Mangle the name for the thread_local wrapper function.
3043 SmallString<256> WrapperName;
3044 {
3045 llvm::raw_svector_ostream Out(WrapperName);
3046 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
3047 }
3048
3049 // FIXME: If VD is a definition, we should regenerate the function attributes
3050 // before returning.
3051 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
3052 return cast<llvm::Function>(V);
3053
3054 QualType RetQT = VD->getType();
3055 if (RetQT->isReferenceType())
3056 RetQT = RetQT.getNonReferenceType();
3057
3058 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
3059 getContext().getPointerType(RetQT), FunctionArgList());
3060
3061 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
3062 llvm::Function *Wrapper =
3063 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
3064 WrapperName.str(), &CGM.getModule());
3065
3066 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
3067 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
3068
3069 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
3070
3071 // Always resolve references to the wrapper at link time.
3072 if (!Wrapper->hasLocalLinkage())
3073 if (!isThreadWrapperReplaceable(VD, CGM) ||
3074 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
3075 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
3077 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
3078
3079 if (isThreadWrapperReplaceable(VD, CGM)) {
3080 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3081 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
3082 }
3083
3084 ThreadWrappers.push_back({VD, Wrapper});
3085 return Wrapper;
3086}
3087
3088void ItaniumCXXABI::EmitThreadLocalInitFuncs(
3089 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
3090 ArrayRef<llvm::Function *> CXXThreadLocalInits,
3091 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
3092 llvm::Function *InitFunc = nullptr;
3093
3094 // Separate initializers into those with ordered (or partially-ordered)
3095 // initialization and those with unordered initialization.
3097 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
3098 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
3100 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
3101 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
3102 CXXThreadLocalInits[I];
3103 else
3104 OrderedInits.push_back(CXXThreadLocalInits[I]);
3105 }
3106
3107 if (!OrderedInits.empty()) {
3108 // Generate a guarded initialization function.
3109 llvm::FunctionType *FTy =
3110 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
3112 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
3114 /*TLS=*/true);
3115 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
3116 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
3117 llvm::GlobalVariable::InternalLinkage,
3118 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
3119 Guard->setThreadLocal(true);
3120 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
3121
3122 CharUnits GuardAlign = CharUnits::One();
3123 Guard->setAlignment(GuardAlign.getAsAlign());
3124
3126 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
3127 // On Darwin platforms, use CXX_FAST_TLS calling convention.
3128 if (CGM.getTarget().getTriple().isOSDarwin()) {
3129 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3130 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
3131 }
3132 }
3133
3134 // Create declarations for thread wrappers for all thread-local variables
3135 // with non-discardable definitions in this translation unit.
3136 for (const VarDecl *VD : CXXThreadLocals) {
3137 if (VD->hasDefinition() &&
3138 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
3139 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
3140 getOrCreateThreadLocalWrapper(VD, GV);
3141 }
3142 }
3143
3144 // Emit all referenced thread wrappers.
3145 for (auto VDAndWrapper : ThreadWrappers) {
3146 const VarDecl *VD = VDAndWrapper.first;
3147 llvm::GlobalVariable *Var =
3148 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
3149 llvm::Function *Wrapper = VDAndWrapper.second;
3150
3151 // Some targets require that all access to thread local variables go through
3152 // the thread wrapper. This means that we cannot attempt to create a thread
3153 // wrapper or a thread helper.
3154 if (!VD->hasDefinition()) {
3155 if (isThreadWrapperReplaceable(VD, CGM)) {
3156 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
3157 continue;
3158 }
3159
3160 // If this isn't a TU in which this variable is defined, the thread
3161 // wrapper is discardable.
3162 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
3163 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
3164 }
3165
3166 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
3167
3168 // Mangle the name for the thread_local initialization function.
3169 SmallString<256> InitFnName;
3170 {
3171 llvm::raw_svector_ostream Out(InitFnName);
3172 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
3173 }
3174
3175 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
3176
3177 // If we have a definition for the variable, emit the initialization
3178 // function as an alias to the global Init function (if any). Otherwise,
3179 // produce a declaration of the initialization function.
3180 llvm::GlobalValue *Init = nullptr;
3181 bool InitIsInitFunc = false;
3182 bool HasConstantInitialization = false;
3183 if (!usesThreadWrapperFunction(VD)) {
3184 HasConstantInitialization = true;
3185 } else if (VD->hasDefinition()) {
3186 InitIsInitFunc = true;
3187 llvm::Function *InitFuncToUse = InitFunc;
3189 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
3190 if (InitFuncToUse)
3191 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
3192 InitFuncToUse);
3193 } else {
3194 // Emit a weak global function referring to the initialization function.
3195 // This function will not exist if the TU defining the thread_local
3196 // variable in question does not need any dynamic initialization for
3197 // its thread_local variables.
3198 Init = llvm::Function::Create(InitFnTy,
3199 llvm::GlobalVariable::ExternalWeakLinkage,
3200 InitFnName.str(), &CGM.getModule());
3203 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
3204 }
3205
3206 if (Init) {
3207 Init->setVisibility(Var->getVisibility());
3208 // Don't mark an extern_weak function DSO local on windows.
3209 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3210 Init->setDSOLocal(Var->isDSOLocal());
3211 }
3212
3213 llvm::LLVMContext &Context = CGM.getModule().getContext();
3214
3215 // The linker on AIX is not happy with missing weak symbols. However,
3216 // other TUs will not know whether the initialization routine exists
3217 // so create an empty, init function to satisfy the linker.
3218 // This is needed whenever a thread wrapper function is not used, and
3219 // also when the symbol is weak.
3220 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3221 isEmittedWithConstantInitializer(VD, true) &&
3222 !mayNeedDestruction(VD)) {
3223 // Init should be null. If it were non-null, then the logic above would
3224 // either be defining the function to be an alias or declaring the
3225 // function with the expectation that the definition of the variable
3226 // is elsewhere.
3227 assert(Init == nullptr && "Expected Init to be null.");
3228
3229 llvm::Function *Func = llvm::Function::Create(
3230 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
3233 cast<llvm::Function>(Func),
3234 /*IsThunk=*/false);
3235 // Create a function body that just returns
3236 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
3237 CGBuilderTy Builder(CGM, Entry);
3238 Builder.CreateRetVoid();
3239 }
3240
3241 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
3242 CGBuilderTy Builder(CGM, Entry);
3243 if (HasConstantInitialization) {
3244 // No dynamic initialization to invoke.
3245 } else if (InitIsInitFunc) {
3246 if (Init) {
3247 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
3248 if (isThreadWrapperReplaceable(VD, CGM)) {
3249 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3250 llvm::Function *Fn =
3251 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3252 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3253 }
3254 }
3255 } else if (CGM.getTriple().isOSAIX()) {
3256 // On AIX, except if constinit and also neither of class type or of
3257 // (possibly multi-dimensional) array of class type, thread_local vars
3258 // will have init routines regardless of whether they are
3259 // const-initialized. Since the routine is guaranteed to exist, we can
3260 // unconditionally call it without testing for its existance. This
3261 // avoids potentially unresolved weak symbols which the AIX linker
3262 // isn't happy with.
3263 Builder.CreateCall(InitFnTy, Init);
3264 } else {
3265 // Don't know whether we have an init function. Call it if it exists.
3266 llvm::Value *Have = Builder.CreateIsNotNull(Init);
3267 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3268 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3269 Builder.CreateCondBr(Have, InitBB, ExitBB);
3270
3271 Builder.SetInsertPoint(InitBB);
3272 Builder.CreateCall(InitFnTy, Init);
3273 Builder.CreateBr(ExitBB);
3274
3275 Builder.SetInsertPoint(ExitBB);
3276 }
3277
3278 // For a reference, the result of the wrapper function is a pointer to
3279 // the referenced object.
3280 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
3281
3282 if (VD->getType()->isReferenceType()) {
3283 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3284 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3285 }
3286
3287 Builder.CreateRet(Val);
3288 }
3289}
3290
3291LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3292 const VarDecl *VD,
3293 QualType LValType) {
3294 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3295 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3296
3297 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3298 CallVal->setCallingConv(Wrapper->getCallingConv());
3299
3300 LValue LV;
3301 if (VD->getType()->isReferenceType())
3302 LV = CGF.MakeNaturalAlignRawAddrLValue(CallVal, LValType);
3303 else
3304 LV = CGF.MakeRawAddrLValue(CallVal, LValType,
3305 CGF.getContext().getDeclAlign(VD));
3306 // FIXME: need setObjCGCLValueClass?
3307 return LV;
3308}
3309
3310/// Return whether the given global decl needs a VTT parameter, which it does
3311/// if it's a base constructor or destructor with virtual bases.
3312bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3313 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3314
3315 // We don't have any virtual bases, just return early.
3316 if (!MD->getParent()->getNumVBases())
3317 return false;
3318
3319 // Check if we have a base constructor.
3320 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3321 return true;
3322
3323 // Check if we have a base destructor.
3324 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3325 return true;
3326
3327 return false;
3328}
3329
3330llvm::Constant *
3331ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) {
3332 SmallString<256> MethodName;
3333 llvm::raw_svector_ostream Out(MethodName);
3334 getMangleContext().mangleCXXName(MD, Out);
3335 MethodName += "_vfpthunk_";
3336 StringRef ThunkName = MethodName.str();
3337 llvm::Function *ThunkFn;
3338 if ((ThunkFn = cast_or_null<llvm::Function>(
3339 CGM.getModule().getNamedValue(ThunkName))))
3340 return ThunkFn;
3341
3342 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD);
3343 llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
3344 llvm::GlobalValue::LinkageTypes Linkage =
3345 MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage
3346 : llvm::GlobalValue::InternalLinkage;
3347 ThunkFn =
3348 llvm::Function::Create(ThunkTy, Linkage, ThunkName, &CGM.getModule());
3349 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3350 ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3351 assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
3352
3353 CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn, /*IsThunk=*/true);
3355
3356 // Stack protection sometimes gets inserted after the musttail call.
3357 ThunkFn->removeFnAttr(llvm::Attribute::StackProtect);
3358 ThunkFn->removeFnAttr(llvm::Attribute::StackProtectStrong);
3359 ThunkFn->removeFnAttr(llvm::Attribute::StackProtectReq);
3360
3361 // Start codegen.
3362 CodeGenFunction CGF(CGM);
3363 CGF.CurGD = GlobalDecl(MD);
3364 CGF.CurFuncIsThunk = true;
3365
3366 // Build FunctionArgs.
3367 FunctionArgList FunctionArgs;
3368 CGF.BuildFunctionArgList(CGF.CurGD, FunctionArgs);
3369
3370 CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
3371 FunctionArgs, MD->getLocation(), SourceLocation());
3372 llvm::Value *ThisVal = loadIncomingCXXThis(CGF);
3373 setCXXABIThisValue(CGF, ThisVal);
3374
3375 CallArgList CallArgs;
3376 for (const VarDecl *VD : FunctionArgs)
3377 CGF.EmitDelegateCallArg(CallArgs, VD, SourceLocation());
3378
3379 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
3381 const CGFunctionInfo &CallInfo =
3382 CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, Required, 0);
3384 getThisAddress(CGF), ThunkTy);
3385 llvm::CallBase *CallOrInvoke;
3386 CGF.EmitCall(CallInfo, Callee, ReturnValueSlot(), CallArgs, &CallOrInvoke,
3387 /*IsMustTail=*/true, SourceLocation(), true);
3388 auto *Call = cast<llvm::CallInst>(CallOrInvoke);
3389 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
3390 if (Call->getType()->isVoidTy())
3391 CGF.Builder.CreateRetVoid();
3392 else
3393 CGF.Builder.CreateRet(Call);
3394
3395 // Finish the function to maintain CodeGenFunction invariants.
3396 // FIXME: Don't emit unreachable code.
3397 CGF.EmitBlock(CGF.createBasicBlock());
3398 CGF.FinishFunction();
3399 return ThunkFn;
3400}
3401
3402namespace {
3403class ItaniumRTTIBuilder {
3404 CodeGenModule &CGM; // Per-module state.
3405 llvm::LLVMContext &VMContext;
3406 const ItaniumCXXABI &CXXABI; // Per-module state.
3407
3408 /// Fields - The fields of the RTTI descriptor currently being built.
3410
3411 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3412 llvm::GlobalVariable *
3413 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3414
3415 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3416 /// descriptor of the given type.
3417 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3418
3419 /// BuildVTablePointer - Build the vtable pointer for the given type.
3420 void BuildVTablePointer(const Type *Ty);
3421
3422 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3423 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3424 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3425
3426 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3427 /// classes with bases that do not satisfy the abi::__si_class_type_info
3428 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3429 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3430
3431 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3432 /// for pointer types.
3433 void BuildPointerTypeInfo(QualType PointeeTy);
3434
3435 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3436 /// type_info for an object type.
3437 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3438
3439 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3440 /// struct, used for member pointer types.
3441 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3442
3443public:
3444 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3445 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3446
3447 // Pointer type info flags.
3448 enum {
3449 /// PTI_Const - Type has const qualifier.
3450 PTI_Const = 0x1,
3451
3452 /// PTI_Volatile - Type has volatile qualifier.
3453 PTI_Volatile = 0x2,
3454
3455 /// PTI_Restrict - Type has restrict qualifier.
3456 PTI_Restrict = 0x4,
3457
3458 /// PTI_Incomplete - Type is incomplete.
3459 PTI_Incomplete = 0x8,
3460
3461 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3462 /// (in pointer to member).
3463 PTI_ContainingClassIncomplete = 0x10,
3464
3465 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3466 //PTI_TransactionSafe = 0x20,
3467
3468 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3469 PTI_Noexcept = 0x40,
3470 };
3471
3472 // VMI type info flags.
3473 enum {
3474 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3475 VMI_NonDiamondRepeat = 0x1,
3476
3477 /// VMI_DiamondShaped - Class is diamond shaped.
3478 VMI_DiamondShaped = 0x2
3479 };
3480
3481 // Base class type info flags.
3482 enum {
3483 /// BCTI_Virtual - Base class is virtual.
3484 BCTI_Virtual = 0x1,
3485
3486 /// BCTI_Public - Base class is public.
3487 BCTI_Public = 0x2
3488 };
3489
3490 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3491 /// link to an existing RTTI descriptor if one already exists.
3492 llvm::Constant *BuildTypeInfo(QualType Ty);
3493
3494 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3495 llvm::Constant *BuildTypeInfo(
3496 QualType Ty,
3497 llvm::GlobalVariable::LinkageTypes Linkage,
3498 llvm::GlobalValue::VisibilityTypes Visibility,
3499 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3500};
3501}
3502
3503llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3504 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3505 SmallString<256> Name;
3506 llvm::raw_svector_ostream Out(Name);
3508
3509 // We know that the mangled name of the type starts at index 4 of the
3510 // mangled name of the typename, so we can just index into it in order to
3511 // get the mangled name of the type.
3512 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3513 Name.substr(4));
3514 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3515
3516 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3517 Name, Init->getType(), Linkage, Align.getAsAlign());
3518
3519 GV->setInitializer(Init);
3520
3521 return GV;
3522}
3523
3524llvm::Constant *
3525ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3526 // Mangle the RTTI name.
3527 SmallString<256> Name;
3528 llvm::raw_svector_ostream Out(Name);
3529 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3530
3531 // Look for an existing global.
3532 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3533
3534 if (!GV) {
3535 // Create a new global variable.
3536 // Note for the future: If we would ever like to do deferred emission of
3537 // RTTI, check if emitting vtables opportunistically need any adjustment.
3538
3539 GV = new llvm::GlobalVariable(
3540 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3541 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3542 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3543 CGM.setGVProperties(GV, RD);
3544 // Import the typeinfo symbol when all non-inline virtual methods are
3545 // imported.
3546 if (CGM.getTarget().hasPS4DLLImportExport()) {
3547 if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
3548 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3549 CGM.setDSOLocal(GV);
3550 }
3551 }
3552 }
3553
3554 return GV;
3555}
3556
3557/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3558/// info for that type is defined in the standard library.
3560 // Itanium C++ ABI 2.9.2:
3561 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3562 // the run-time support library. Specifically, the run-time support
3563 // library should contain type_info objects for the types X, X* and
3564 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3565 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3566 // long, unsigned long, long long, unsigned long long, float, double,
3567 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3568 // half-precision floating point types.
3569 //
3570 // GCC also emits RTTI for __int128.
3571 // FIXME: We do not emit RTTI information for decimal types here.
3572
3573 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3574 switch (Ty->getKind()) {
3575 case BuiltinType::Void:
3576 case BuiltinType::NullPtr:
3577 case BuiltinType::Bool:
3578 case BuiltinType::WChar_S:
3579 case BuiltinType::WChar_U:
3580 case BuiltinType::Char_U:
3581 case BuiltinType::Char_S:
3582 case BuiltinType::UChar:
3583 case BuiltinType::SChar:
3584 case BuiltinType::Short:
3585 case BuiltinType::UShort:
3586 case BuiltinType::Int:
3587 case BuiltinType::UInt:
3588 case BuiltinType::Long:
3589 case BuiltinType::ULong:
3590 case BuiltinType::LongLong:
3591 case BuiltinType::ULongLong:
3592 case BuiltinType::Half:
3593 case BuiltinType::Float:
3594 case BuiltinType::Double:
3595 case BuiltinType::LongDouble:
3596 case BuiltinType::Float16:
3597 case BuiltinType::Float128:
3598 case BuiltinType::Ibm128:
3599 case BuiltinType::Char8:
3600 case BuiltinType::Char16:
3601 case BuiltinType::Char32:
3602 case BuiltinType::Int128:
3603 case BuiltinType::UInt128:
3604 return true;
3605
3606#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3607 case BuiltinType::Id:
3608#include "clang/Basic/OpenCLImageTypes.def"
3609#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3610 case BuiltinType::Id:
3611#include "clang/Basic/OpenCLExtensionTypes.def"
3612 case BuiltinType::OCLSampler:
3613 case BuiltinType::OCLEvent:
3614 case BuiltinType::OCLClkEvent:
3615 case BuiltinType::OCLQueue:
3616 case BuiltinType::OCLReserveID:
3617#define SVE_TYPE(Name, Id, SingletonId) \
3618 case BuiltinType::Id:
3619#include "clang/Basic/AArch64SVEACLETypes.def"
3620#define PPC_VECTOR_TYPE(Name, Id, Size) \
3621 case BuiltinType::Id:
3622#include "clang/Basic/PPCTypes.def"
3623#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3624#include "clang/Basic/RISCVVTypes.def"
3625#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3626#include "clang/Basic/WebAssemblyReferenceTypes.def"
3627#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3628#include "clang/Basic/AMDGPUTypes.def"
3629#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3630#include "clang/Basic/HLSLIntangibleTypes.def"
3631 case BuiltinType::ShortAccum:
3632 case BuiltinType::Accum:
3633 case BuiltinType::LongAccum:
3634 case BuiltinType::UShortAccum:
3635 case BuiltinType::UAccum:
3636 case BuiltinType::ULongAccum:
3637 case BuiltinType::ShortFract:
3638 case BuiltinType::Fract:
3639 case BuiltinType::LongFract:
3640 case BuiltinType::UShortFract:
3641 case BuiltinType::UFract:
3642 case BuiltinType::ULongFract:
3643 case BuiltinType::SatShortAccum:
3644 case BuiltinType::SatAccum:
3645 case BuiltinType::SatLongAccum:
3646 case BuiltinType::SatUShortAccum:
3647 case BuiltinType::SatUAccum:
3648 case BuiltinType::SatULongAccum:
3649 case BuiltinType::SatShortFract:
3650 case BuiltinType::SatFract:
3651 case BuiltinType::SatLongFract:
3652 case BuiltinType::SatUShortFract:
3653 case BuiltinType::SatUFract:
3654 case BuiltinType::SatULongFract:
3655 case BuiltinType::BFloat16:
3656 return false;
3657
3658 case BuiltinType::Dependent:
3659#define BUILTIN_TYPE(Id, SingletonId)
3660#define PLACEHOLDER_TYPE(Id, SingletonId) \
3661 case BuiltinType::Id:
3662#include "clang/AST/BuiltinTypes.def"
3663 llvm_unreachable("asking for RRTI for a placeholder type!");
3664
3665 case BuiltinType::ObjCId:
3666 case BuiltinType::ObjCClass:
3667 case BuiltinType::ObjCSel:
3668 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3669 }
3670
3671 llvm_unreachable("Invalid BuiltinType Kind!");
3672}
3673
3674static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3675 QualType PointeeTy = PointerTy->getPointeeType();
3676 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3677 if (!BuiltinTy)
3678 return false;
3679
3680 // Check the qualifiers.
3681 Qualifiers Quals = PointeeTy.getQualifiers();
3682 Quals.removeConst();
3683
3684 if (!Quals.empty())
3685 return false;
3686
3687 return TypeInfoIsInStandardLibrary(BuiltinTy);
3688}
3689
3690/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3691/// information for the given type exists in the standard library.
3693 // Type info for builtin types is defined in the standard library.
3694 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3695 return TypeInfoIsInStandardLibrary(BuiltinTy);
3696
3697 // Type info for some pointer types to builtin types is defined in the
3698 // standard library.
3699 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3700 return TypeInfoIsInStandardLibrary(PointerTy);
3701
3702 return false;
3703}
3704
3705/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3706/// the given type exists somewhere else, and that we should not emit the type
3707/// information in this translation unit. Assumes that it is not a
3708/// standard-library type.
3710 QualType Ty) {
3711 ASTContext &Context = CGM.getContext();
3712
3713 // If RTTI is disabled, assume it might be disabled in the
3714 // translation unit that defines any potential key function, too.
3715 if (!Context.getLangOpts().RTTI) return false;
3716
3717 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3718 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3719 if (!RD->hasDefinition())
3720 return false;
3721
3722 if (!RD->isDynamicClass())
3723 return false;
3724
3725 // FIXME: this may need to be reconsidered if the key function
3726 // changes.
3727 // N.B. We must always emit the RTTI data ourselves if there exists a key
3728 // function.
3729 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3730
3731 // Don't import the RTTI but emit it locally.
3732 if (CGM.getTriple().isWindowsGNUEnvironment())
3733 return false;
3734
3735 if (CGM.getVTables().isVTableExternal(RD)) {
3736 if (CGM.getTarget().hasPS4DLLImportExport())
3737 return true;
3738
3739 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3740 ? false
3741 : true;
3742 }
3743 if (IsDLLImport)
3744 return true;
3745 }
3746
3747 return false;
3748}
3749
3750/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3751static bool IsIncompleteClassType(const RecordType *RecordTy) {
3752 return !RecordTy->getDecl()->isCompleteDefinition();
3753}
3754
3755/// ContainsIncompleteClassType - Returns whether the given type contains an
3756/// incomplete class type. This is true if
3757///
3758/// * The given type is an incomplete class type.
3759/// * The given type is a pointer type whose pointee type contains an
3760/// incomplete class type.
3761/// * The given type is a member pointer type whose class is an incomplete
3762/// class type.
3763/// * The given type is a member pointer type whoise pointee type contains an
3764/// incomplete class type.
3765/// is an indirect or direct pointer to an incomplete class type.
3767 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3768 if (IsIncompleteClassType(RecordTy))
3769 return true;
3770 }
3771
3772 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3773 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3774
3775 if (const MemberPointerType *MemberPointerTy =
3776 dyn_cast<MemberPointerType>(Ty)) {
3777 // Check if the class type is incomplete.
3778 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3779 if (IsIncompleteClassType(ClassType))
3780 return true;
3781
3782 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3783 }
3784
3785 return false;
3786}
3787
3788// CanUseSingleInheritance - Return whether the given record decl has a "single,
3789// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3790// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3792 // Check the number of bases.
3793 if (RD->getNumBases() != 1)
3794 return false;
3795
3796 // Get the base.
3798
3799 // Check that the base is not virtual.
3800 if (Base->isVirtual())
3801 return false;
3802
3803 // Check that the base is public.
3804 if (Base->getAccessSpecifier() != AS_public)
3805 return false;
3806
3807 // Check that the class is dynamic iff the base is.
3808 auto *BaseDecl =
3809 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3810 if (!BaseDecl->isEmpty() &&
3811 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3812 return false;
3813
3814 return true;
3815}
3816
3817void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3818 // abi::__class_type_info.
3819 static const char * const ClassTypeInfo =
3820 "_ZTVN10__cxxabiv117__class_type_infoE";
3821 // abi::__si_class_type_info.
3822 static const char * const SIClassTypeInfo =
3823 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3824 // abi::__vmi_class_type_info.
3825 static const char * const VMIClassTypeInfo =
3826 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3827
3828 const char *VTableName = nullptr;
3829
3830 switch (Ty->getTypeClass()) {
3831#define TYPE(Class, Base)
3832#define ABSTRACT_TYPE(Class, Base)
3833#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3834#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3835#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3836#include "clang/AST/TypeNodes.inc"
3837 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3838
3839 case Type::LValueReference:
3840 case Type::RValueReference:
3841 llvm_unreachable("References shouldn't get here");
3842
3843 case Type::Auto:
3844 case Type::DeducedTemplateSpecialization:
3845 llvm_unreachable("Undeduced type shouldn't get here");
3846
3847 case Type::Pipe:
3848 llvm_unreachable("Pipe types shouldn't get here");
3849
3850 case Type::ArrayParameter:
3851 llvm_unreachable("Array Parameter types should not get here.");
3852
3853 case Type::Builtin:
3854 case Type::BitInt:
3855 // GCC treats vector and complex types as fundamental types.
3856 case Type::Vector:
3857 case Type::ExtVector:
3858 case Type::ConstantMatrix:
3859 case Type::Complex:
3860 case Type::Atomic:
3861 // FIXME: GCC treats block pointers as fundamental types?!
3862 case Type::BlockPointer:
3863 // abi::__fundamental_type_info.
3864 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3865 break;
3866
3867 case Type::ConstantArray:
3868 case Type::IncompleteArray:
3869 case Type::VariableArray:
3870 // abi::__array_type_info.
3871 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3872 break;
3873
3874 case Type::FunctionNoProto:
3875 case Type::FunctionProto:
3876 // abi::__function_type_info.
3877 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3878 break;
3879
3880 case Type::Enum:
3881 // abi::__enum_type_info.
3882 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3883 break;
3884
3885 case Type::Record: {
3886 const CXXRecordDecl *RD =
3887 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3888
3889 if (!RD->hasDefinition() || !RD->getNumBases()) {
3890 VTableName = ClassTypeInfo;
3891 } else if (CanUseSingleInheritance(RD)) {
3892 VTableName = SIClassTypeInfo;
3893 } else {
3894 VTableName = VMIClassTypeInfo;
3895 }
3896
3897 break;
3898 }
3899
3900 case Type::ObjCObject:
3901 // Ignore protocol qualifiers.
3902 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3903
3904 // Handle id and Class.
3905 if (isa<BuiltinType>(Ty)) {
3906 VTableName = ClassTypeInfo;
3907 break;
3908 }
3909
3910 assert(isa<ObjCInterfaceType>(Ty));
3911 [[fallthrough]];
3912
3913 case Type::ObjCInterface:
3914 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3915 VTableName = SIClassTypeInfo;
3916 } else {
3917 VTableName = ClassTypeInfo;
3918 }
3919 break;
3920
3921 case Type::ObjCObjectPointer:
3922 case Type::Pointer:
3923 // abi::__pointer_type_info.
3924 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3925 break;
3926
3927 case Type::MemberPointer:
3928 // abi::__pointer_to_member_type_info.
3929 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3930 break;
3931 }
3932
3933 llvm::Constant *VTable = nullptr;
3934
3935 // Check if the alias exists. If it doesn't, then get or create the global.
3937 VTable = CGM.getModule().getNamedAlias(VTableName);
3938 if (!VTable) {
3939 llvm::Type *Ty = llvm::ArrayType::get(CGM.GlobalsInt8PtrTy, 0);
3940 VTable = CGM.getModule().getOrInsertGlobal(VTableName, Ty);
3941 }
3942
3943 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3944
3945 llvm::Type *PtrDiffTy =
3947
3948 // The vtable address point is 2.
3950 // The vtable address point is 8 bytes after its start:
3951 // 4 for the offset to top + 4 for the relative offset to rtti.
3952 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3953 VTable =
3954 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3955 } else {
3956 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3957 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy,
3958 VTable, Two);
3959 }
3960
3961 if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
3962 VTable = CGM.getConstantSignedPointer(VTable, Schema, nullptr, GlobalDecl(),
3963 QualType(Ty, 0));
3964
3965 Fields.push_back(VTable);
3966}
3967
3968/// Return the linkage that the type info and type info name constants
3969/// should have for the given type.
3970static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3971 QualType Ty) {
3972 // Itanium C++ ABI 2.9.5p7:
3973 // In addition, it and all of the intermediate abi::__pointer_type_info
3974 // structs in the chain down to the abi::__class_type_info for the
3975 // incomplete class type must be prevented from resolving to the
3976 // corresponding type_info structs for the complete class type, possibly
3977 // by making them local static objects. Finally, a dummy class RTTI is
3978 // generated for the incomplete type that will not resolve to the final
3979 // complete class RTTI (because the latter need not exist), possibly by
3980 // making it a local static object.
3982 return llvm::GlobalValue::InternalLinkage;
3983
3984 switch (Ty->getLinkage()) {
3985 case Linkage::Invalid:
3986 llvm_unreachable("Linkage hasn't been computed!");
3987
3988 case Linkage::None:
3989 case Linkage::Internal:
3991 return llvm::GlobalValue::InternalLinkage;
3992
3994 case Linkage::Module:
3995 case Linkage::External:
3996 // RTTI is not enabled, which means that this type info struct is going
3997 // to be used for exception handling. Give it linkonce_odr linkage.
3998 if (!CGM.getLangOpts().RTTI)
3999 return llvm::GlobalValue::LinkOnceODRLinkage;
4000
4001 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
4002 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
4003 if (RD->hasAttr<WeakAttr>())
4004 return llvm::GlobalValue::WeakODRLinkage;
4005 if (CGM.getTriple().isWindowsItaniumEnvironment())
4006 if (RD->hasAttr<DLLImportAttr>() &&
4008 return llvm::GlobalValue::ExternalLinkage;
4009 // MinGW always uses LinkOnceODRLinkage for type info.
4010 if (RD->isDynamicClass() &&
4011 !CGM.getContext()
4012 .getTargetInfo()
4013 .getTriple()
4014 .isWindowsGNUEnvironment())
4015 return CGM.getVTableLinkage(RD);
4016 }
4017
4018 return llvm::GlobalValue::LinkOnceODRLinkage;
4019 }
4020
4021 llvm_unreachable("Invalid linkage!");
4022}
4023
4024llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
4025 // We want to operate on the canonical type.
4026 Ty = Ty.getCanonicalType();
4027
4028 // Check if we've already emitted an RTTI descriptor for this type.
4029 SmallString<256> Name;
4030 llvm::raw_svector_ostream Out(Name);
4031 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
4032
4033 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
4034 if (OldGV && !OldGV->isDeclaration()) {
4035 assert(!OldGV->hasAvailableExternallyLinkage() &&
4036 "available_externally typeinfos not yet implemented");
4037
4038 return OldGV;
4039 }
4040
4041 // Check if there is already an external RTTI descriptor for this type.
4044 return GetAddrOfExternalRTTIDescriptor(Ty);
4045
4046 // Emit the standard library with external linkage.
4047 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
4048
4049 // Give the type_info object and name the formal visibility of the
4050 // type itself.
4051 llvm::GlobalValue::VisibilityTypes llvmVisibility;
4052 if (llvm::GlobalValue::isLocalLinkage(Linkage))
4053 // If the linkage is local, only default visibility makes sense.
4054 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
4055 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
4056 ItaniumCXXABI::RUK_NonUniqueHidden)
4057 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
4058 else
4059 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
4060
4061 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4062 llvm::GlobalValue::DefaultStorageClass;
4063 if (auto RD = Ty->getAsCXXRecordDecl()) {
4064 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
4065 RD->hasAttr<DLLExportAttr>()) ||
4067 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
4068 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
4069 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
4070 }
4071 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
4072}
4073
4074llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
4075 QualType Ty,
4076 llvm::GlobalVariable::LinkageTypes Linkage,
4077 llvm::GlobalValue::VisibilityTypes Visibility,
4078 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
4079 // Add the vtable pointer.
4080 BuildVTablePointer(cast<Type>(Ty));
4081
4082 // And the name.
4083 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
4084 llvm::Constant *TypeNameField;
4085
4086 // If we're supposed to demote the visibility, be sure to set a flag
4087 // to use a string comparison for type_info comparisons.
4088 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
4089 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
4090 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
4091 // The flag is the sign bit, which on ARM64 is defined to be clear
4092 // for global pointers. This is very ARM64-specific.
4093 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
4094 llvm::Constant *flag =
4095 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
4096 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
4097 TypeNameField =
4098 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy);
4099 } else {
4100 TypeNameField = TypeName;
4101 }
4102 Fields.push_back(TypeNameField);
4103
4104 switch (Ty->getTypeClass()) {
4105#define TYPE(Class, Base)
4106#define ABSTRACT_TYPE(Class, Base)
4107#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
4108#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4109#define DEPENDENT_TYPE(Class, Base) case Type::Class:
4110#include "clang/AST/TypeNodes.inc"
4111 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
4112
4113 // GCC treats vector types as fundamental types.
4114 case Type::Builtin:
4115 case Type::Vector:
4116 case Type::ExtVector:
4117 case Type::ConstantMatrix:
4118 case Type::Complex:
4119 case Type::BlockPointer:
4120 // Itanium C++ ABI 2.9.5p4:
4121 // abi::__fundamental_type_info adds no data members to std::type_info.
4122 break;
4123
4124 case Type::LValueReference:
4125 case Type::RValueReference:
4126 llvm_unreachable("References shouldn't get here");
4127
4128 case Type::Auto:
4129 case Type::DeducedTemplateSpecialization:
4130 llvm_unreachable("Undeduced type shouldn't get here");
4131
4132 case Type::Pipe:
4133 break;
4134
4135 case Type::BitInt:
4136 break;
4137
4138 case Type::ConstantArray:
4139 case Type::IncompleteArray:
4140 case Type::VariableArray:
4141 case Type::ArrayParameter:
4142 // Itanium C++ ABI 2.9.5p5:
4143 // abi::__array_type_info adds no data members to std::type_info.
4144 break;
4145
4146 case Type::FunctionNoProto:
4147 case Type::FunctionProto:
4148 // Itanium C++ ABI 2.9.5p5:
4149 // abi::__function_type_info adds no data members to std::type_info.
4150 break;
4151
4152 case Type::Enum:
4153 // Itanium C++ ABI 2.9.5p5:
4154 // abi::__enum_type_info adds no data members to std::type_info.
4155 break;
4156
4157 case Type::Record: {
4158 const CXXRecordDecl *RD =
4159 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
4160 if (!RD->hasDefinition() || !RD->getNumBases()) {
4161 // We don't need to emit any fields.
4162 break;
4163 }
4164
4166 BuildSIClassTypeInfo(RD);
4167 else
4168 BuildVMIClassTypeInfo(RD);
4169
4170 break;
4171 }
4172
4173 case Type::ObjCObject:
4174 case Type::ObjCInterface:
4175 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
4176 break;
4177
4178 case Type::ObjCObjectPointer:
4179 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
4180 break;
4181
4182 case Type::Pointer:
4183 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
4184 break;
4185
4186 case Type::MemberPointer:
4187 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
4188 break;
4189
4190 case Type::Atomic:
4191 // No fields, at least for the moment.
4192 break;
4193 }
4194
4195 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
4196
4197 SmallString<256> Name;
4198 llvm::raw_svector_ostream Out(Name);
4199 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
4200 llvm::Module &M = CGM.getModule();
4201 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
4202 llvm::GlobalVariable *GV =
4203 new llvm::GlobalVariable(M, Init->getType(),
4204 /*isConstant=*/true, Linkage, Init, Name);
4205
4206 // Export the typeinfo in the same circumstances as the vtable is exported.
4207 auto GVDLLStorageClass = DLLStorageClass;
4208 if (CGM.getTarget().hasPS4DLLImportExport() &&
4209 GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
4210 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
4211 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
4212 if (RD->hasAttr<DLLExportAttr>() ||
4213 CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
4214 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
4215 }
4216 }
4217
4218 // If there's already an old global variable, replace it with the new one.
4219 if (OldGV) {
4220 GV->takeName(OldGV);
4221 OldGV->replaceAllUsesWith(GV);
4222 OldGV->eraseFromParent();
4223 }
4224
4225 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
4226 GV->setComdat(M.getOrInsertComdat(GV->getName()));
4227
4230 GV->setAlignment(Align.getAsAlign());
4231
4232 // The Itanium ABI specifies that type_info objects must be globally
4233 // unique, with one exception: if the type is an incomplete class
4234 // type or a (possibly indirect) pointer to one. That exception
4235 // affects the general case of comparing type_info objects produced
4236 // by the typeid operator, which is why the comparison operators on
4237 // std::type_info generally use the type_info name pointers instead
4238 // of the object addresses. However, the language's built-in uses
4239 // of RTTI generally require class types to be complete, even when
4240 // manipulating pointers to those class types. This allows the
4241 // implementation of dynamic_cast to rely on address equality tests,
4242 // which is much faster.
4243
4244 // All of this is to say that it's important that both the type_info
4245 // object and the type_info name be uniqued when weakly emitted.
4246
4247 TypeName->setVisibility(Visibility);
4248 CGM.setDSOLocal(TypeName);
4249
4250 GV->setVisibility(Visibility);
4251 CGM.setDSOLocal(GV);
4252
4253 TypeName->setDLLStorageClass(DLLStorageClass);
4254 GV->setDLLStorageClass(GVDLLStorageClass);
4255
4256 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4257 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4258
4259 return GV;
4260}
4261
4262/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
4263/// for the given Objective-C object type.
4264void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
4265 // Drop qualifiers.
4266 const Type *T = OT->getBaseType().getTypePtr();
4267 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
4268
4269 // The builtin types are abi::__class_type_infos and don't require
4270 // extra fields.
4271 if (isa<BuiltinType>(T)) return;
4272
4273 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
4274 ObjCInterfaceDecl *Super = Class->getSuperClass();
4275
4276 // Root classes are also __class_type_info.
4277 if (!Super) return;
4278
4279 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
4280
4281 // Everything else is single inheritance.
4282 llvm::Constant *BaseTypeInfo =
4283 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
4284 Fields.push_back(BaseTypeInfo);
4285}
4286
4287/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4288/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4289void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4290 // Itanium C++ ABI 2.9.5p6b:
4291 // It adds to abi::__class_type_info a single member pointing to the
4292 // type_info structure for the base type,
4293 llvm::Constant *BaseTypeInfo =
4294 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
4295 Fields.push_back(BaseTypeInfo);
4296}
4297
4298namespace {
4299 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4300 /// a class hierarchy.
4301 struct SeenBases {
4304 };
4305}
4306
4307/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4308/// abi::__vmi_class_type_info.
4309///
4311 SeenBases &Bases) {
4312
4313 unsigned Flags = 0;
4314
4315 auto *BaseDecl =
4316 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
4317
4318 if (Base->isVirtual()) {
4319 // Mark the virtual base as seen.
4320 if (!Bases.VirtualBases.insert(BaseDecl).second) {
4321 // If this virtual base has been seen before, then the class is diamond
4322 // shaped.
4323 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4324 } else {
4325 if (Bases.NonVirtualBases.count(BaseDecl))
4326 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4327 }
4328 } else {
4329 // Mark the non-virtual base as seen.
4330 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
4331 // If this non-virtual base has been seen before, then the class has non-
4332 // diamond shaped repeated inheritance.
4333 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4334 } else {
4335 if (Bases.VirtualBases.count(BaseDecl))
4336 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4337 }
4338 }
4339
4340 // Walk all bases.
4341 for (const auto &I : BaseDecl->bases())
4342 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4343
4344 return Flags;
4345}
4346
4348 unsigned Flags = 0;
4349 SeenBases Bases;
4350
4351 // Walk all bases.
4352 for (const auto &I : RD->bases())
4353 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4354
4355 return Flags;
4356}
4357
4358/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4359/// classes with bases that do not satisfy the abi::__si_class_type_info
4360/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4361void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4362 llvm::Type *UnsignedIntLTy =
4364
4365 // Itanium C++ ABI 2.9.5p6c:
4366 // __flags is a word with flags describing details about the class
4367 // structure, which may be referenced by using the __flags_masks
4368 // enumeration. These flags refer to both direct and indirect bases.
4369 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4370 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4371
4372 // Itanium C++ ABI 2.9.5p6c:
4373 // __base_count is a word with the number of direct proper base class
4374 // descriptions that follow.
4375 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4376
4377 if (!RD->getNumBases())
4378 return;
4379
4380 // Now add the base class descriptions.
4381
4382 // Itanium C++ ABI 2.9.5p6c:
4383 // __base_info[] is an array of base class descriptions -- one for every
4384 // direct proper base. Each description is of the type:
4385 //
4386 // struct abi::__base_class_type_info {
4387 // public:
4388 // const __class_type_info *__base_type;
4389 // long __offset_flags;
4390 //
4391 // enum __offset_flags_masks {
4392 // __virtual_mask = 0x1,
4393 // __public_mask = 0x2,
4394 // __offset_shift = 8
4395 // };
4396 // };
4397
4398 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4399 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4400 // LLP64 platforms.
4401 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4402 // LLP64 platforms.
4403 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4404 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4405 if (TI.getTriple().isOSCygMing() &&
4407 OffsetFlagsTy = CGM.getContext().LongLongTy;
4408 llvm::Type *OffsetFlagsLTy =
4409 CGM.getTypes().ConvertType(OffsetFlagsTy);
4410
4411 for (const auto &Base : RD->bases()) {
4412 // The __base_type member points to the RTTI for the base type.
4413 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4414
4415 auto *BaseDecl =
4416 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4417
4418 int64_t OffsetFlags = 0;
4419
4420 // All but the lower 8 bits of __offset_flags are a signed offset.
4421 // For a non-virtual base, this is the offset in the object of the base
4422 // subobject. For a virtual base, this is the offset in the virtual table of
4423 // the virtual base offset for the virtual base referenced (negative).
4424 CharUnits Offset;
4425 if (Base.isVirtual())
4426 Offset =
4428 else {
4429 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4430 Offset = Layout.getBaseClassOffset(BaseDecl);
4431 };
4432
4433 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4434
4435 // The low-order byte of __offset_flags contains flags, as given by the
4436 // masks from the enumeration __offset_flags_masks.
4437 if (Base.isVirtual())
4438 OffsetFlags |= BCTI_Virtual;
4439 if (Base.getAccessSpecifier() == AS_public)
4440 OffsetFlags |= BCTI_Public;
4441
4442 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4443 }
4444}
4445
4446/// Compute the flags for a __pbase_type_info, and remove the corresponding
4447/// pieces from \p Type.
4449 unsigned Flags = 0;
4450
4451 if (Type.isConstQualified())
4452 Flags |= ItaniumRTTIBuilder::PTI_Const;
4453 if (Type.isVolatileQualified())
4454 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4455 if (Type.isRestrictQualified())
4456 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4457 Type = Type.getUnqualifiedType();
4458
4459 // Itanium C++ ABI 2.9.5p7:
4460 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4461 // incomplete class type, the incomplete target type flag is set.
4463 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4464
4465 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4466 if (Proto->isNothrow()) {
4467 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4469 }
4470 }
4471
4472 return Flags;
4473}
4474
4475/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4476/// used for pointer types.
4477void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4478 // Itanium C++ ABI 2.9.5p7:
4479 // __flags is a flag word describing the cv-qualification and other
4480 // attributes of the type pointed to
4481 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4482
4483 llvm::Type *UnsignedIntLTy =
4485 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4486
4487 // Itanium C++ ABI 2.9.5p7:
4488 // __pointee is a pointer to the std::type_info derivation for the
4489 // unqualified type being pointed to.
4490 llvm::Constant *PointeeTypeInfo =
4491 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4492 Fields.push_back(PointeeTypeInfo);
4493}
4494
4495/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4496/// struct, used for member pointer types.
4497void
4498ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4499 QualType PointeeTy = Ty->getPointeeType();
4500
4501 // Itanium C++ ABI 2.9.5p7:
4502 // __flags is a flag word describing the cv-qualification and other
4503 // attributes of the type pointed to.
4504 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4505
4506 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4507 if (IsIncompleteClassType(ClassType))
4508 Flags |= PTI_ContainingClassIncomplete;
4509
4510 llvm::Type *UnsignedIntLTy =
4512 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4513
4514 // Itanium C++ ABI 2.9.5p7:
4515 // __pointee is a pointer to the std::type_info derivation for the
4516 // unqualified type being pointed to.
4517 llvm::Constant *PointeeTypeInfo =
4518 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4519 Fields.push_back(PointeeTypeInfo);
4520
4521 // Itanium C++ ABI 2.9.5p9:
4522 // __context is a pointer to an abi::__class_type_info corresponding to the
4523 // class type containing the member pointed to
4524 // (e.g., the "A" in "int A::*").
4525 Fields.push_back(
4526 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4527}
4528
4529llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4530 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4531}
4532
4533void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4534 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4535 QualType FundamentalTypes[] = {
4536 getContext().VoidTy, getContext().NullPtrTy,
4537 getContext().BoolTy, getContext().WCharTy,
4538 getContext().CharTy, getContext().UnsignedCharTy,
4539 getContext().SignedCharTy, getContext().ShortTy,
4540 getContext().UnsignedShortTy, getContext().IntTy,
4541 getContext().UnsignedIntTy, getContext().LongTy,
4542 getContext().UnsignedLongTy, getContext().LongLongTy,
4543 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4544 getContext().UnsignedInt128Ty, getContext().HalfTy,
4545 getContext().FloatTy, getContext().DoubleTy,
4546 getContext().LongDoubleTy, getContext().Float128Ty,
4547 getContext().Char8Ty, getContext().Char16Ty,
4548 getContext().Char32Ty
4549 };
4550 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4551 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4552 ? llvm::GlobalValue::DLLExportStorageClass
4553 : llvm::GlobalValue::DefaultStorageClass;
4554 llvm::GlobalValue::VisibilityTypes Visibility =
4556 for (const QualType &FundamentalType : FundamentalTypes) {
4557 QualType PointerType = getContext().getPointerType(FundamentalType);
4558 QualType PointerTypeConst = getContext().getPointerType(
4559 FundamentalType.withConst());
4560 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4561 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4562 Type, llvm::GlobalValue::ExternalLinkage,
4563 Visibility, DLLStorageClass);
4564 }
4565}
4566
4567/// What sort of uniqueness rules should we use for the RTTI for the
4568/// given type?
4569ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4570 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4571 if (shouldRTTIBeUnique())
4572 return RUK_Unique;
4573
4574 // It's only necessary for linkonce_odr or weak_odr linkage.
4575 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4576 Linkage != llvm::GlobalValue::WeakODRLinkage)
4577 return RUK_Unique;
4578
4579 // It's only necessary with default visibility.
4580 if (CanTy->getVisibility() != DefaultVisibility)
4581 return RUK_Unique;
4582
4583 // If we're not required to publish this symbol, hide it.
4584 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4585 return RUK_NonUniqueHidden;
4586
4587 // If we're required to publish this symbol, as we might be under an
4588 // explicit instantiation, leave it with default visibility but
4589 // enable string-comparisons.
4590 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4591 return RUK_NonUniqueVisible;
4592}
4593
4594// Find out how to codegen the complete destructor and constructor
4595namespace {
4596enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4597}
4598static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4599 const CXXMethodDecl *MD) {
4600 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4601 return StructorCodegen::Emit;
4602
4603 // The complete and base structors are not equivalent if there are any virtual
4604 // bases, so emit separate functions.
4605 if (MD->getParent()->getNumVBases())
4606 return StructorCodegen::Emit;
4607
4609 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4611 } else {
4612 const auto *CD = cast<CXXConstructorDecl>(MD);
4614 }
4615 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4616
4617 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4618 return StructorCodegen::RAUW;
4619
4620 // FIXME: Should we allow available_externally aliases?
4621 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4622 return StructorCodegen::RAUW;
4623
4624 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4625 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4626 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4627 CGM.getTarget().getTriple().isOSBinFormatWasm())
4628 return StructorCodegen::COMDAT;
4629 return StructorCodegen::Emit;
4630 }
4631
4632 return StructorCodegen::Alias;
4633}
4634
4637 GlobalDecl TargetDecl) {
4638 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4639
4640 StringRef MangledName = CGM.getMangledName(AliasDecl);
4641 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4642 if (Entry && !Entry->isDeclaration())
4643 return;
4644
4645 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4646
4647 // Create the alias with no name.
4648 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4649
4650 // Constructors and destructors are always unnamed_addr.
4651 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4652
4653 // Switch any previous uses to the alias.
4654 if (Entry) {
4655 assert(Entry->getType() == Aliasee->getType() &&
4656 "declaration exists with different type");
4657 Alias->takeName(Entry);
4658 Entry->replaceAllUsesWith(Alias);
4659 Entry->eraseFromParent();
4660 } else {
4661 Alias->setName(MangledName);
4662 }
4663
4664 // Finally, set up the alias with its proper name and attributes.
4665 CGM.SetCommonAttributes(AliasDecl, Alias);
4666}
4667
4668void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4669 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4670 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4671 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4672
4673 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4674
4675 if (CD ? GD.getCtorType() == Ctor_Complete
4676 : GD.getDtorType() == Dtor_Complete) {
4677 GlobalDecl BaseDecl;
4678 if (CD)
4679 BaseDecl = GD.getWithCtorType(Ctor_Base);
4680 else
4681 BaseDecl = GD.getWithDtorType(Dtor_Base);
4682
4683 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4684 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4685 return;
4686 }
4687
4688 if (CGType == StructorCodegen::RAUW) {
4689 StringRef MangledName = CGM.getMangledName(GD);
4690 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4691 CGM.addReplacement(MangledName, Aliasee);
4692 return;
4693 }
4694 }
4695
4696 // The base destructor is equivalent to the base destructor of its
4697 // base class if there is exactly one non-virtual base class with a
4698 // non-trivial destructor, there are no fields with a non-trivial
4699 // destructor, and the body of the destructor is trivial.
4700 if (DD && GD.getDtorType() == Dtor_Base &&
4701 CGType != StructorCodegen::COMDAT &&
4703 return;
4704
4705 // FIXME: The deleting destructor is equivalent to the selected operator
4706 // delete if:
4707 // * either the delete is a destroying operator delete or the destructor
4708 // would be trivial if it weren't virtual,
4709 // * the conversion from the 'this' parameter to the first parameter of the
4710 // destructor is equivalent to a bitcast,
4711 // * the destructor does not have an implicit "this" return, and
4712 // * the operator delete has the same calling convention and IR function type
4713 // as the destructor.
4714 // In such cases we should try to emit the deleting dtor as an alias to the
4715 // selected 'operator delete'.
4716
4717 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4718
4719 if (CGType == StructorCodegen::COMDAT) {
4720 SmallString<256> Buffer;
4721 llvm::raw_svector_ostream Out(Buffer);
4722 if (DD)
4723 getMangleContext().mangleCXXDtorComdat(DD, Out);
4724 else
4725 getMangleContext().mangleCXXCtorComdat(CD, Out);
4726 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4727 Fn->setComdat(C);
4728 } else {
4729 CGM.maybeSetTrivialComdat(*MD, *Fn);
4730 }
4731}
4732
4733static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4734 // void *__cxa_begin_catch(void*);
4735 llvm::FunctionType *FTy = llvm::FunctionType::get(
4736 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4737
4738 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4739}
4740
4741static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4742 // void __cxa_end_catch();
4743 llvm::FunctionType *FTy =
4744 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4745
4746 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4747}
4748
4749static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4750 // void *__cxa_get_exception_ptr(void*);
4751 llvm::FunctionType *FTy = llvm::FunctionType::get(
4752 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4753
4754 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4755}
4756
4757namespace {
4758 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4759 /// exception type lets us state definitively that the thrown exception
4760 /// type does not have a destructor. In particular:
4761 /// - Catch-alls tell us nothing, so we have to conservatively
4762 /// assume that the thrown exception might have a destructor.
4763 /// - Catches by reference behave according to their base types.
4764 /// - Catches of non-record types will only trigger for exceptions
4765 /// of non-record types, which never have destructors.
4766 /// - Catches of record types can trigger for arbitrary subclasses
4767 /// of the caught type, so we have to assume the actual thrown
4768 /// exception type might have a throwing destructor, even if the
4769 /// caught type's destructor is trivial or nothrow.
4770 struct CallEndCatch final : EHScopeStack::Cleanup {
4771 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4772 bool MightThrow;
4773
4774 void Emit(CodeGenFunction &CGF, Flags flags) override {
4775 if (!MightThrow) {
4777 return;
4778 }
4779
4781 }
4782 };
4783}
4784
4785/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4786/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
4787/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
4788/// call can be marked as nounwind even if EndMightThrow is true.
4789///
4790/// \param EndMightThrow - true if __cxa_end_catch might throw
4791static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4792 llvm::Value *Exn,
4793 bool EndMightThrow) {
4794 llvm::CallInst *call =
4796
4797 CGF.EHStack.pushCleanup<CallEndCatch>(
4799 EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
4800
4801 return call;
4802}
4803
4804/// A "special initializer" callback for initializing a catch
4805/// parameter during catch initialization.
4807 const VarDecl &CatchParam,
4808 Address ParamAddr,
4810 // Load the exception from where the landing pad saved it.
4811 llvm::Value *Exn = CGF.getExceptionFromSlot();
4812
4813 CanQualType CatchType =
4814 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4815 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4816
4817 // If we're catching by reference, we can just cast the object
4818 // pointer to the appropriate pointer.
4819 if (isa<ReferenceType>(CatchType)) {
4820 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4821 bool EndCatchMightThrow = CaughtType->isRecordType();
4822
4823 // __cxa_begin_catch returns the adjusted object pointer.
4824 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4825
4826 // We have no way to tell the personality function that we're
4827 // catching by reference, so if we're catching a pointer,
4828 // __cxa_begin_catch will actually return that pointer by value.
4829 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4830 QualType PointeeType = PT->getPointeeType();
4831
4832 // When catching by reference, generally we should just ignore
4833 // this by-value pointer and use the exception object instead.
4834 if (!PointeeType->isRecordType()) {
4835
4836 // Exn points to the struct _Unwind_Exception header, which
4837 // we have to skip past in order to reach the exception data.
4838 unsigned HeaderSize =
4840 AdjustedExn =
4841 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4842
4843 // However, if we're catching a pointer-to-record type that won't
4844 // work, because the personality function might have adjusted
4845 // the pointer. There's actually no way for us to fully satisfy
4846 // the language/ABI contract here: we can't use Exn because it
4847 // might have the wrong adjustment, but we can't use the by-value
4848 // pointer because it's off by a level of abstraction.
4849 //
4850 // The current solution is to dump the adjusted pointer into an
4851 // alloca, which breaks language semantics (because changing the
4852 // pointer doesn't change the exception) but at least works.
4853 // The better solution would be to filter out non-exact matches
4854 // and rethrow them, but this is tricky because the rethrow
4855 // really needs to be catchable by other sites at this landing
4856 // pad. The best solution is to fix the personality function.
4857 } else {
4858 // Pull the pointer for the reference type off.
4859 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4860
4861 // Create the temporary and write the adjusted pointer into it.
4862 Address ExnPtrTmp =
4863 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4864 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4865 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4866
4867 // Bind the reference to the temporary.
4868 AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
4869 }
4870 }
4871
4872 llvm::Value *ExnCast =
4873 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4874 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4875 return;
4876 }
4877
4878 // Scalars and complexes.
4879 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4880 if (TEK != TEK_Aggregate) {
4881 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4882
4883 // If the catch type is a pointer type, __cxa_begin_catch returns
4884 // the pointer by value.
4885 if (CatchType->hasPointerRepresentation()) {
4886 llvm::Value *CastExn =
4887 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4888
4889 switch (CatchType.getQualifiers().getObjCLifetime()) {
4891 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4892 [[fallthrough]];
4893
4897 CGF.Builder.CreateStore(CastExn, ParamAddr);
4898 return;
4899
4901 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4902 return;
4903 }
4904 llvm_unreachable("bad ownership qualifier!");
4905 }
4906
4907 // Otherwise, it returns a pointer into the exception object.
4908
4909 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType);
4910 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4911 switch (TEK) {
4912 case TEK_Complex:
4913 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4914 /*init*/ true);
4915 return;
4916 case TEK_Scalar: {
4917 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4918 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4919 return;
4920 }
4921 case TEK_Aggregate:
4922 llvm_unreachable("evaluation kind filtered out!");
4923 }
4924 llvm_unreachable("bad evaluation kind");
4925 }
4926
4927 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4928 auto catchRD = CatchType->getAsCXXRecordDecl();
4929 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4930
4931 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
4932
4933 // Check for a copy expression. If we don't have a copy expression,
4934 // that means a trivial copy is okay.
4935 const Expr *copyExpr = CatchParam.getInit();
4936 if (!copyExpr) {
4937 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4938 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4939 LLVMCatchTy, caughtExnAlignment);
4940 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4941 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4942 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4943 return;
4944 }
4945
4946 // We have to call __cxa_get_exception_ptr to get the adjusted
4947 // pointer before copying.
4948 llvm::CallInst *rawAdjustedExn =
4950
4951 // Cast that to the appropriate type.
4952 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4953 LLVMCatchTy, caughtExnAlignment);
4954
4955 // The copy expression is defined in terms of an OpaqueValueExpr.
4956 // Find it and map it to the adjusted expression.
4957 CodeGenFunction::OpaqueValueMapping
4958 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4959 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4960
4961 // Call the copy ctor in a terminate scope.
4962 CGF.EHStack.pushTerminate();
4963
4964 // Perform the copy construction.
4965 CGF.EmitAggExpr(copyExpr,
4966 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4971
4972 // Leave the terminate scope.
4973 CGF.EHStack.popTerminate();
4974
4975 // Undo the opaque value mapping.
4976 opaque.pop();
4977
4978 // Finally we can call __cxa_begin_catch.
4979 CallBeginCatch(CGF, Exn, true);
4980}
4981
4982/// Begins a catch statement by initializing the catch variable and
4983/// calling __cxa_begin_catch.
4984void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4985 const CXXCatchStmt *S) {
4986 // We have to be very careful with the ordering of cleanups here:
4987 // C++ [except.throw]p4:
4988 // The destruction [of the exception temporary] occurs
4989 // immediately after the destruction of the object declared in
4990 // the exception-declaration in the handler.
4991 //
4992 // So the precise ordering is:
4993 // 1. Construct catch variable.
4994 // 2. __cxa_begin_catch
4995 // 3. Enter __cxa_end_catch cleanup
4996 // 4. Enter dtor cleanup
4997 //
4998 // We do this by using a slightly abnormal initialization process.
4999 // Delegation sequence:
5000 // - ExitCXXTryStmt opens a RunCleanupsScope
5001 // - EmitAutoVarAlloca creates the variable and debug info
5002 // - InitCatchParam initializes the variable from the exception
5003 // - CallBeginCatch calls __cxa_begin_catch
5004 // - CallBeginCatch enters the __cxa_end_catch cleanup
5005 // - EmitAutoVarCleanups enters the variable destructor cleanup
5006 // - EmitCXXTryStmt emits the code for the catch body
5007 // - EmitCXXTryStmt close the RunCleanupsScope
5008
5009 VarDecl *CatchParam = S->getExceptionDecl();
5010 if (!CatchParam) {
5011 llvm::Value *Exn = CGF.getExceptionFromSlot();
5012 CallBeginCatch(CGF, Exn, true);
5013 return;
5014 }
5015
5016 // Emit the local.
5017 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
5018 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
5019 CGF.EmitAutoVarCleanups(var);
5020}
5021
5022/// Get or define the following function:
5023/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
5024/// This code is used only in C++.
5025static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
5026 ASTContext &C = CGM.getContext();
5028 C.VoidTy, {C.getPointerType(C.CharTy)});
5029 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
5030 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
5031 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
5032 llvm::Function *fn =
5033 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
5034 if (fn->empty()) {
5035 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
5037 fn->setDoesNotThrow();
5038 fn->setDoesNotReturn();
5039
5040 // What we really want is to massively penalize inlining without
5041 // forbidding it completely. The difference between that and
5042 // 'noinline' is negligible.
5043 fn->addFnAttr(llvm::Attribute::NoInline);
5044
5045 // Allow this function to be shared across translation units, but
5046 // we don't want it to turn into an exported symbol.
5047 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
5048 fn->setVisibility(llvm::Function::HiddenVisibility);
5049 if (CGM.supportsCOMDAT())
5050 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
5051
5052 // Set up the function.
5053 llvm::BasicBlock *entry =
5054 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
5055 CGBuilderTy builder(CGM, entry);
5056
5057 // Pull the exception pointer out of the parameter list.
5058 llvm::Value *exn = &*fn->arg_begin();
5059
5060 // Call __cxa_begin_catch(exn).
5061 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
5062 catchCall->setDoesNotThrow();
5063 catchCall->setCallingConv(CGM.getRuntimeCC());
5064
5065 // Call std::terminate().
5066 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
5067 termCall->setDoesNotThrow();
5068 termCall->setDoesNotReturn();
5069 termCall->setCallingConv(CGM.getRuntimeCC());
5070
5071 // std::terminate cannot return.
5072 builder.CreateUnreachable();
5073 }
5074 return fnRef;
5075}
5076
5077llvm::CallInst *
5078ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5079 llvm::Value *Exn) {
5080 // In C++, we want to call __cxa_begin_catch() before terminating.
5081 if (Exn) {
5082 assert(CGF.CGM.getLangOpts().CPlusPlus);
5084 }
5085 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
5086}
5087
5088std::pair<llvm::Value *, const CXXRecordDecl *>
5089ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
5090 const CXXRecordDecl *RD) {
5091 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
5092}
5093
5094llvm::Constant *
5095ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
5096 const CXXMethodDecl *origMD =
5097 cast<CXXMethodDecl>(CGM.getItaniumVTableContext()
5099 .getDecl());
5100 llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD);
5101 QualType funcType = CGM.getContext().getMemberPointerType(
5102 MD->getType(), MD->getParent()->getTypeForDecl());
5103 return CGM.getMemberFunctionPointer(thunk, funcType);
5104}
5105
5106void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5107 const CXXCatchStmt *C) {
5108 if (CGF.getTarget().hasFeature("exception-handling"))
5109 CGF.EHStack.pushCleanup<CatchRetScope>(
5110 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
5111 ItaniumCXXABI::emitBeginCatch(CGF, C);
5112}
5113
5114llvm::CallInst *
5115WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5116 llvm::Value *Exn) {
5117 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
5118 // the violating exception to mark it handled, but it is currently hard to do
5119 // with wasm EH instruction structure with catch/catch_all, we just call
5120 // std::terminate and ignore the violating exception as in CGCXXABI.
5121 // TODO Consider code transformation that makes calling __clang_call_terminate
5122 // possible.
5124}
5125
5126/// Register a global destructor as best as we know how.
5127void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
5128 llvm::FunctionCallee Dtor,
5129 llvm::Constant *Addr) {
5130 if (D.getTLSKind() != VarDecl::TLS_None) {
5131 llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
5132
5133 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
5134 llvm::FunctionType *AtExitTy =
5135 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true);
5136
5137 // Fetch the actual function.
5138 llvm::FunctionCallee AtExit =
5139 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
5140
5141 // Create __dtor function for the var decl.
5142 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
5143
5144 // Register above __dtor with atexit().
5145 // First param is flags and must be 0, second param is function ptr
5146 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
5147 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
5148
5149 // Cannot unregister TLS __dtor so done
5150 return;
5151 }
5152
5153 // Create __dtor function for the var decl.
5154 llvm::Function *DtorStub =
5155 cast<llvm::Function>(CGF.createAtExitStub(D, Dtor, Addr));
5156
5157 // Register above __dtor with atexit().
5158 CGF.registerGlobalDtorWithAtExit(DtorStub);
5159
5160 // Emit __finalize function to unregister __dtor and (as appropriate) call
5161 // __dtor.
5162 emitCXXStermFinalizer(D, DtorStub, Addr);
5163}
5164
5165void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
5166 llvm::Constant *addr) {
5167 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
5168 SmallString<256> FnName;
5169 {
5170 llvm::raw_svector_ostream Out(FnName);
5171 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
5172 }
5173
5174 // Create the finalization action associated with a variable.
5176 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
5177 FTy, FnName.str(), FI, D.getLocation());
5178
5179 CodeGenFunction CGF(CGM);
5180
5181 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
5183 D.getInit()->getExprLoc());
5184
5185 // The unatexit subroutine unregisters __dtor functions that were previously
5186 // registered by the atexit subroutine. If the referenced function is found,
5187 // the unatexit returns a value of 0, meaning that the cleanup is still
5188 // pending (and we should call the __dtor function).
5189 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
5190
5191 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
5192
5193 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
5194 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
5195
5196 // Check if unatexit returns a value of 0. If it does, jump to
5197 // DestructCallBlock, otherwise jump to EndBlock directly.
5198 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
5199
5200 CGF.EmitBlock(DestructCallBlock);
5201
5202 // Emit the call to dtorStub.
5203 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
5204
5205 // Make sure the call and the callee agree on calling convention.
5206 CI->setCallingConv(dtorStub->getCallingConv());
5207
5208 CGF.EmitBlock(EndBlock);
5209
5210 CGF.FinishFunction();
5211
5212 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
5213 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
5214 IPA->getPriority());
5215 } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
5216 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
5217 // According to C++ [basic.start.init]p2, class template static data
5218 // members (i.e., implicitly or explicitly instantiated specializations)
5219 // have unordered initialization. As a consequence, we can put them into
5220 // their own llvm.global_dtors entry.
5221 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
5222 } else {
5223 CGM.AddCXXStermFinalizerEntry(StermFinalizer);
5224 }
5225}
#define V(N, I)
Definition: ASTContext.h:3341
const Decl * D
IndirectLocalPath & Path
Expr * E
static StructorCodegen getCodegenToUse(CodeGenModule &CGM, const CXXMethodDecl *MD)
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM)
Get or define the following function: void @__clang_call_terminate(i8* exn) nounwind noreturn This co...
static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD)
static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type)
Compute the flags for a __pbase_type_info, and remove the corresponding pieces from Type.
static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty)
ShouldUseExternalRTTIDescriptor - Returns whether the type information for the given type exists some...
static bool IsIncompleteClassType(const RecordType *RecordTy)
IsIncompleteClassType - Returns whether the given record type is incomplete.
static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, SeenBases &Bases)
ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in abi::__vmi_class_type_info.
static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF)
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, llvm::FunctionCallee dtor, llvm::Constant *addr, bool TLS)
Register a global destructor using __cxa_atexit.
static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF)
static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM)
static llvm::Constant * pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType, QualType SrcType, CodeGenModule &CGM)
static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty)
Return the linkage that the type info and type info name constants should have for the given type.
static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static llvm::Value * performTypeAdjustment(CodeGenFunction &CGF, Address InitialPtr, const CXXRecordDecl *UnadjustedClass, int64_t NonVirtualAdjustment, int64_t VirtualAdjustment, bool IsReturnAdjustment)
static llvm::Function * createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, StringRef FnName)
static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM)
static bool IsStandardLibraryRTTIDescriptor(QualType Ty)
IsStandardLibraryRTTIDescriptor - Returns whether the type information for the given type exists in t...
static llvm::Value * CallBeginCatch(CodeGenFunction &CGF, llvm::Value *Exn, bool EndMightThrow)
Emits a call to __cxa_begin_catch and enters a cleanup to call __cxa_end_catch.
static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static CharUnits computeOffsetHint(ASTContext &Context, const CXXRecordDecl *Src, const CXXRecordDecl *Dst)
Compute the src2dst_offset hint as described in the Itanium C++ ABI [2.9.7].
static bool isThreadWrapperReplaceable(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
static void InitCatchParam(CodeGenFunction &CGF, const VarDecl &CatchParam, Address ParamAddr, SourceLocation Loc)
A "special initializer" callback for initializing a catch parameter during catch initialization.
static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty)
TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type info for that type is de...
static bool CanUseSingleInheritance(const CXXRecordDecl *RD)
static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM)
static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM)
Get the appropriate linkage for the wrapper function.
static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM)
static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM, llvm::GlobalVariable *VTable, const CXXRecordDecl *RD)
static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy)
static bool ContainsIncompleteClassType(QualType Ty)
ContainsIncompleteClassType - Returns whether the given type contains an incomplete class type.
static llvm::Constant * pointerAuthResignConstant(llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo, const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM)
static void emitConstructorDestructorAlias(CodeGenModule &CGM, GlobalDecl AliasDecl, GlobalDecl TargetDecl)
static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM)
static void dtorTy(Block *, std::byte *Ptr, const Descriptor *)
Definition: Descriptor.cpp:29
int Priority
Definition: Format.cpp:3005
llvm::MachO::Record Record
Definition: MachO.h:31
static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD)
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
SourceLocation Loc
Definition: SemaObjC.cpp:758