clang 20.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGObjCRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CGRecordLayout.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/NSAPI.h"
34#include "llvm/ADT/Hashing.h"
35#include "llvm/ADT/STLExtras.h"
36#include "llvm/ADT/StringExtras.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/IntrinsicsWebAssembly.h"
40#include "llvm/IR/LLVMContext.h"
41#include "llvm/IR/MDBuilder.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/Passes/OptimizationLevel.h"
44#include "llvm/Support/ConvertUTF.h"
45#include "llvm/Support/Endian.h"
46#include "llvm/Support/MathExtras.h"
47#include "llvm/Support/Path.h"
48#include "llvm/Support/SaveAndRestore.h"
49#include "llvm/Support/xxhash.h"
50#include "llvm/Transforms/Utils/SanitizerStats.h"
51
52#include <optional>
53#include <string>
54
55using namespace clang;
56using namespace CodeGen;
57
58// Experiment to make sanitizers easier to debug
59static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
60 "ubsan-unique-traps", llvm::cl::Optional,
61 llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."));
62
63// TODO: Introduce frontend options to enabled per sanitizers, similar to
64// `fsanitize-trap`.
65static llvm::cl::opt<bool> ClSanitizeGuardChecks(
66 "ubsan-guard-checks", llvm::cl::Optional,
67 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
68
69//===--------------------------------------------------------------------===//
70// Defines for metadata
71//===--------------------------------------------------------------------===//
72
73// Those values are crucial to be the SAME as in ubsan runtime library.
75 /// An integer type.
76 TK_Integer = 0x0000,
77 /// A floating-point type.
78 TK_Float = 0x0001,
79 /// An _BitInt(N) type.
80 TK_BitInt = 0x0002,
81 /// Any other type. The value representation is unspecified.
82 TK_Unknown = 0xffff
83};
84
85//===--------------------------------------------------------------------===//
86// Miscellaneous Helper Methods
87//===--------------------------------------------------------------------===//
88
89/// CreateTempAlloca - This creates a alloca and inserts it into the entry
90/// block.
92CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
93 const Twine &Name,
94 llvm::Value *ArraySize) {
95 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
96 Alloca->setAlignment(Align.getAsAlign());
97 return RawAddress(Alloca, Ty, Align, KnownNonNull);
98}
99
100/// CreateTempAlloca - This creates a alloca and inserts it into the entry
101/// block. The alloca is casted to default address space if necessary.
103 const Twine &Name,
104 llvm::Value *ArraySize,
105 RawAddress *AllocaAddr) {
106 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
107 if (AllocaAddr)
108 *AllocaAddr = Alloca;
109 llvm::Value *V = Alloca.getPointer();
110 // Alloca always returns a pointer in alloca address space, which may
111 // be different from the type defined by the language. For example,
112 // in C++ the auto variables are in the default address space. Therefore
113 // cast alloca to the default address space when necessary.
115 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
116 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
117 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
118 // otherwise alloca is inserted at the current insertion point of the
119 // builder.
120 if (!ArraySize)
121 Builder.SetInsertPoint(getPostAllocaInsertPoint());
124 Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
125 }
126
127 return RawAddress(V, Ty, Align, KnownNonNull);
128}
129
130/// CreateTempAlloca - This creates an alloca and inserts it into the entry
131/// block if \p ArraySize is nullptr, otherwise inserts it at the current
132/// insertion point of the builder.
133llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
134 const Twine &Name,
135 llvm::Value *ArraySize) {
136 llvm::AllocaInst *Alloca;
137 if (ArraySize)
138 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
139 else
140 Alloca =
141 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
142 ArraySize, Name, AllocaInsertPt->getIterator());
143 if (Allocas) {
144 Allocas->Add(Alloca);
145 }
146 return Alloca;
147}
148
149/// CreateDefaultAlignTempAlloca - This creates an alloca with the
150/// default alignment of the corresponding LLVM type, which is *not*
151/// guaranteed to be related in any way to the expected alignment of
152/// an AST type that might have been lowered to Ty.
154 const Twine &Name) {
155 CharUnits Align =
156 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
157 return CreateTempAlloca(Ty, Align, Name);
158}
159
162 return CreateTempAlloca(ConvertType(Ty), Align, Name);
163}
164
166 RawAddress *Alloca) {
167 // FIXME: Should we prefer the preferred type alignment here?
168 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
169}
170
172 const Twine &Name,
173 RawAddress *Alloca) {
175 /*ArraySize=*/nullptr, Alloca);
176
177 if (Ty->isConstantMatrixType()) {
178 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
179 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
180 ArrayTy->getNumElements());
181
182 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
184 }
185 return Result;
186}
187
189 CharUnits Align,
190 const Twine &Name) {
191 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
192}
193
195 const Twine &Name) {
196 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
197 Name);
198}
199
200/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
201/// expression and compare the result against zero, returning an Int1Ty value.
202llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
203 PGO.setCurrentStmt(E);
204 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
205 llvm::Value *MemPtr = EmitScalarExpr(E);
206 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
207 }
208
209 QualType BoolTy = getContext().BoolTy;
211 CGFPOptionsRAII FPOptsRAII(*this, E);
212 if (!E->getType()->isAnyComplexType())
213 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
214
216 Loc);
217}
218
219/// EmitIgnoredExpr - Emit code to compute the specified expression,
220/// ignoring the result.
222 if (E->isPRValue())
223 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
224
225 // if this is a bitfield-resulting conditional operator, we can special case
226 // emit this. The normal 'EmitLValue' version of this is particularly
227 // difficult to codegen for, since creating a single "LValue" for two
228 // different sized arguments here is not particularly doable.
229 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
231 if (CondOp->getObjectKind() == OK_BitField)
232 return EmitIgnoredConditionalOperator(CondOp);
233 }
234
235 // Just emit it as an l-value and drop the result.
236 EmitLValue(E);
237}
238
239/// EmitAnyExpr - Emit code to compute the specified expression which
240/// can have any type. The result is returned as an RValue struct.
241/// If this is an aggregate expression, AggSlot indicates where the
242/// result should be returned.
244 AggValueSlot aggSlot,
245 bool ignoreResult) {
246 switch (getEvaluationKind(E->getType())) {
247 case TEK_Scalar:
248 return RValue::get(EmitScalarExpr(E, ignoreResult));
249 case TEK_Complex:
250 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
251 case TEK_Aggregate:
252 if (!ignoreResult && aggSlot.isIgnored())
253 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
254 EmitAggExpr(E, aggSlot);
255 return aggSlot.asRValue();
256 }
257 llvm_unreachable("bad evaluation kind");
258}
259
260/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
261/// always be accessible even if no aggregate location is provided.
264
266 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
267 return EmitAnyExpr(E, AggSlot);
268}
269
270/// EmitAnyExprToMem - Evaluate an expression into a given memory
271/// location.
273 Address Location,
274 Qualifiers Quals,
275 bool IsInit) {
276 // FIXME: This function should take an LValue as an argument.
277 switch (getEvaluationKind(E->getType())) {
278 case TEK_Complex:
280 /*isInit*/ false);
281 return;
282
283 case TEK_Aggregate: {
284 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
289 return;
290 }
291
292 case TEK_Scalar: {
293 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
294 LValue LV = MakeAddrLValue(Location, E->getType());
296 return;
297 }
298 }
299 llvm_unreachable("bad evaluation kind");
300}
301
302static void
304 const Expr *E, Address ReferenceTemporary) {
305 // Objective-C++ ARC:
306 // If we are binding a reference to a temporary that has ownership, we
307 // need to perform retain/release operations on the temporary.
308 //
309 // FIXME: This should be looking at E, not M.
310 if (auto Lifetime = M->getType().getObjCLifetime()) {
311 switch (Lifetime) {
314 // Carry on to normal cleanup handling.
315 break;
316
318 // Nothing to do; cleaned up by an autorelease pool.
319 return;
320
323 switch (StorageDuration Duration = M->getStorageDuration()) {
324 case SD_Static:
325 // Note: we intentionally do not register a cleanup to release
326 // the object on program termination.
327 return;
328
329 case SD_Thread:
330 // FIXME: We should probably register a cleanup in this case.
331 return;
332
333 case SD_Automatic:
337 if (Lifetime == Qualifiers::OCL_Strong) {
338 const ValueDecl *VD = M->getExtendingDecl();
339 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
340 VD->hasAttr<ObjCPreciseLifetimeAttr>();
344 } else {
345 // __weak objects always get EH cleanups; otherwise, exceptions
346 // could cause really nasty crashes instead of mere leaks.
349 }
350 if (Duration == SD_FullExpression)
351 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
352 M->getType(), *Destroy,
354 else
355 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
356 M->getType(),
357 *Destroy, CleanupKind & EHCleanup);
358 return;
359
360 case SD_Dynamic:
361 llvm_unreachable("temporary cannot have dynamic storage duration");
362 }
363 llvm_unreachable("unknown storage duration");
364 }
365 }
366
367 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
368 if (const RecordType *RT =
370 // Get the destructor for the reference temporary.
371 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
372 if (!ClassDecl->hasTrivialDestructor())
373 ReferenceTemporaryDtor = ClassDecl->getDestructor();
374 }
375
376 if (!ReferenceTemporaryDtor)
377 return;
378
379 // Call the destructor for the temporary.
380 switch (M->getStorageDuration()) {
381 case SD_Static:
382 case SD_Thread: {
383 llvm::FunctionCallee CleanupFn;
384 llvm::Constant *CleanupArg;
385 if (E->getType()->isArrayType()) {
387 ReferenceTemporary, E->getType(),
389 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
390 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
391 } else {
392 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
393 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
394 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
395 }
397 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
398 break;
399 }
400
402 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
404 CGF.getLangOpts().Exceptions);
405 break;
406
407 case SD_Automatic:
409 ReferenceTemporary, E->getType(),
411 CGF.getLangOpts().Exceptions);
412 break;
413
414 case SD_Dynamic:
415 llvm_unreachable("temporary cannot have dynamic storage duration");
416 }
417}
418
421 const Expr *Inner,
422 RawAddress *Alloca = nullptr) {
423 auto &TCG = CGF.getTargetHooks();
424 switch (M->getStorageDuration()) {
426 case SD_Automatic: {
427 // If we have a constant temporary array or record try to promote it into a
428 // constant global under the same rules a normal constant would've been
429 // promoted. This is easier on the optimizer and generally emits fewer
430 // instructions.
431 QualType Ty = Inner->getType();
432 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
433 (Ty->isArrayType() || Ty->isRecordType()) &&
434 Ty.isConstantStorage(CGF.getContext(), true, false))
435 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
436 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
437 auto *GV = new llvm::GlobalVariable(
438 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
439 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
440 llvm::GlobalValue::NotThreadLocal,
442 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
443 GV->setAlignment(alignment.getAsAlign());
444 llvm::Constant *C = GV;
445 if (AS != LangAS::Default)
446 C = TCG.performAddrSpaceCast(
447 CGF.CGM, GV, AS, LangAS::Default,
448 GV->getValueType()->getPointerTo(
450 // FIXME: Should we put the new global into a COMDAT?
451 return RawAddress(C, GV->getValueType(), alignment);
452 }
453 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
454 }
455 case SD_Thread:
456 case SD_Static:
457 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
458
459 case SD_Dynamic:
460 llvm_unreachable("temporary can't have dynamic storage duration");
461 }
462 llvm_unreachable("unknown storage duration");
463}
464
465/// Helper method to check if the underlying ABI is AAPCS
466static bool isAAPCS(const TargetInfo &TargetInfo) {
467 return TargetInfo.getABI().starts_with("aapcs");
468}
469
472 const Expr *E = M->getSubExpr();
473
474 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
475 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
476 "Reference should never be pseudo-strong!");
477
478 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
479 // as that will cause the lifetime adjustment to be lost for ARC
480 auto ownership = M->getType().getObjCLifetime();
481 if (ownership != Qualifiers::OCL_None &&
482 ownership != Qualifiers::OCL_ExplicitNone) {
484 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
485 llvm::Type *Ty = ConvertTypeForMem(E->getType());
486 Object = Object.withElementType(Ty);
487
488 // createReferenceTemporary will promote the temporary to a global with a
489 // constant initializer if it can. It can only do this to a value of
490 // ARC-manageable type if the value is global and therefore "immune" to
491 // ref-counting operations. Therefore we have no need to emit either a
492 // dynamic initialization or a cleanup and we can just return the address
493 // of the temporary.
494 if (Var->hasInitializer())
495 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
496
497 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
498 }
499 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
501
502 switch (getEvaluationKind(E->getType())) {
503 default: llvm_unreachable("expected scalar or aggregate expression");
504 case TEK_Scalar:
505 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
506 break;
507 case TEK_Aggregate: {
514 break;
515 }
516 }
517
518 pushTemporaryCleanup(*this, M, E, Object);
519 return RefTempDst;
520 }
521
524 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
525
526 for (const auto &Ignored : CommaLHSs)
527 EmitIgnoredExpr(Ignored);
528
529 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
530 if (opaque->getType()->isRecordType()) {
531 assert(Adjustments.empty());
532 return EmitOpaqueValueLValue(opaque);
533 }
534 }
535
536 // Create and initialize the reference temporary.
537 RawAddress Alloca = Address::invalid();
538 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
539 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
540 Object.getPointer()->stripPointerCasts())) {
541 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
542 Object = Object.withElementType(TemporaryType);
543 // If the temporary is a global and has a constant initializer or is a
544 // constant temporary that we promoted to a global, we may have already
545 // initialized it.
546 if (!Var->hasInitializer()) {
547 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
548 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
549 }
550 } else {
551 switch (M->getStorageDuration()) {
552 case SD_Automatic:
553 if (auto *Size = EmitLifetimeStart(
554 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
555 Alloca.getPointer())) {
556 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
557 Alloca, Size);
558 }
559 break;
560
561 case SD_FullExpression: {
562 if (!ShouldEmitLifetimeMarkers)
563 break;
564
565 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
566 // marker. Instead, start the lifetime of a conditional temporary earlier
567 // so that it's unconditional. Don't do this with sanitizers which need
568 // more precise lifetime marks. However when inside an "await.suspend"
569 // block, we should always avoid conditional cleanup because it creates
570 // boolean marker that lives across await_suspend, which can destroy coro
571 // frame.
572 ConditionalEvaluation *OldConditional = nullptr;
573 CGBuilderTy::InsertPoint OldIP;
575 ((!SanOpts.has(SanitizerKind::HWAddress) &&
576 !SanOpts.has(SanitizerKind::Memory) &&
577 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
578 inSuspendBlock())) {
579 OldConditional = OutermostConditional;
580 OutermostConditional = nullptr;
581
582 OldIP = Builder.saveIP();
583 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
584 Builder.restoreIP(CGBuilderTy::InsertPoint(
585 Block, llvm::BasicBlock::iterator(Block->back())));
586 }
587
588 if (auto *Size = EmitLifetimeStart(
589 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
590 Alloca.getPointer())) {
591 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
592 Size);
593 }
594
595 if (OldConditional) {
596 OutermostConditional = OldConditional;
597 Builder.restoreIP(OldIP);
598 }
599 break;
600 }
601
602 default:
603 break;
604 }
605 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
606 }
607 pushTemporaryCleanup(*this, M, E, Object);
608
609 // Perform derived-to-base casts and/or field accesses, to get from the
610 // temporary object we created (and, potentially, for which we extended
611 // the lifetime) to the subobject we're binding the reference to.
612 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
613 switch (Adjustment.Kind) {
615 Object =
616 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
617 Adjustment.DerivedToBase.BasePath->path_begin(),
618 Adjustment.DerivedToBase.BasePath->path_end(),
619 /*NullCheckValue=*/ false, E->getExprLoc());
620 break;
621
624 LV = EmitLValueForField(LV, Adjustment.Field);
625 assert(LV.isSimple() &&
626 "materialized temporary field is not a simple lvalue");
627 Object = LV.getAddress();
628 break;
629 }
630
632 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
634 Adjustment.Ptr.MPT);
635 break;
636 }
637 }
638 }
639
640 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
641}
642
643RValue
645 // Emit the expression as an lvalue.
646 LValue LV = EmitLValue(E);
647 assert(LV.isSimple());
648 llvm::Value *Value = LV.getPointer(*this);
649
651 // C++11 [dcl.ref]p5 (as amended by core issue 453):
652 // If a glvalue to which a reference is directly bound designates neither
653 // an existing object or function of an appropriate type nor a region of
654 // storage of suitable size and alignment to contain an object of the
655 // reference's type, the behavior is undefined.
656 QualType Ty = E->getType();
658 }
659
660 return RValue::get(Value);
661}
662
663
664/// getAccessedFieldNo - Given an encoded value and a result number, return the
665/// input field number being accessed.
666unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
667 const llvm::Constant *Elts) {
668 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
669 ->getZExtValue();
670}
671
672static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
673 llvm::Value *Ptr) {
674 llvm::Value *A0 =
675 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
676 llvm::Value *A1 =
677 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
678 return Builder.CreateXor(Acc, A1);
679}
680
681bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
682 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
684}
685
686bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
688 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
689 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
692}
693
695 return SanOpts.has(SanitizerKind::Null) ||
696 SanOpts.has(SanitizerKind::Alignment) ||
697 SanOpts.has(SanitizerKind::ObjectSize) ||
698 SanOpts.has(SanitizerKind::Vptr);
699}
700
702 llvm::Value *Ptr, QualType Ty,
703 CharUnits Alignment,
704 SanitizerSet SkippedChecks,
705 llvm::Value *ArraySize) {
707 return;
708
709 // Don't check pointers outside the default address space. The null check
710 // isn't correct, the object-size check isn't supported by LLVM, and we can't
711 // communicate the addresses to the runtime handler for the vptr check.
712 if (Ptr->getType()->getPointerAddressSpace())
713 return;
714
715 // Don't check pointers to volatile data. The behavior here is implementation-
716 // defined.
717 if (Ty.isVolatileQualified())
718 return;
719
720 SanitizerScope SanScope(this);
721
723 llvm::BasicBlock *Done = nullptr;
724
725 // Quickly determine whether we have a pointer to an alloca. It's possible
726 // to skip null checks, and some alignment checks, for these pointers. This
727 // can reduce compile-time significantly.
728 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
729
730 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
731 llvm::Value *IsNonNull = nullptr;
732 bool IsGuaranteedNonNull =
733 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
734 bool AllowNullPointers = isNullPointerAllowed(TCK);
735 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
736 !IsGuaranteedNonNull) {
737 // The glvalue must not be an empty glvalue.
738 IsNonNull = Builder.CreateIsNotNull(Ptr);
739
740 // The IR builder can constant-fold the null check if the pointer points to
741 // a constant.
742 IsGuaranteedNonNull = IsNonNull == True;
743
744 // Skip the null check if the pointer is known to be non-null.
745 if (!IsGuaranteedNonNull) {
746 if (AllowNullPointers) {
747 // When performing pointer casts, it's OK if the value is null.
748 // Skip the remaining checks in that case.
749 Done = createBasicBlock("null");
750 llvm::BasicBlock *Rest = createBasicBlock("not.null");
751 Builder.CreateCondBr(IsNonNull, Rest, Done);
752 EmitBlock(Rest);
753 } else {
754 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
755 }
756 }
757 }
758
759 if (SanOpts.has(SanitizerKind::ObjectSize) &&
760 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
761 !Ty->isIncompleteType()) {
763 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
764 if (ArraySize)
765 Size = Builder.CreateMul(Size, ArraySize);
766
767 // Degenerate case: new X[0] does not need an objectsize check.
768 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
769 if (!ConstantSize || !ConstantSize->isNullValue()) {
770 // The glvalue must refer to a large enough storage region.
771 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
772 // to check this.
773 // FIXME: Get object address space
774 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
775 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
776 llvm::Value *Min = Builder.getFalse();
777 llvm::Value *NullIsUnknown = Builder.getFalse();
778 llvm::Value *Dynamic = Builder.getFalse();
779 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
780 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
781 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
782 }
783 }
784
785 llvm::MaybeAlign AlignVal;
786 llvm::Value *PtrAsInt = nullptr;
787
788 if (SanOpts.has(SanitizerKind::Alignment) &&
789 !SkippedChecks.has(SanitizerKind::Alignment)) {
790 AlignVal = Alignment.getAsMaybeAlign();
791 if (!Ty->isIncompleteType() && !AlignVal)
792 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
793 /*ForPointeeType=*/true)
795
796 // The glvalue must be suitably aligned.
797 if (AlignVal && *AlignVal > llvm::Align(1) &&
798 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
799 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
800 llvm::Value *Align = Builder.CreateAnd(
801 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
802 llvm::Value *Aligned =
803 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
804 if (Aligned != True)
805 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
806 }
807 }
808
809 if (Checks.size() > 0) {
810 llvm::Constant *StaticData[] = {
812 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
813 llvm::ConstantInt::get(Int8Ty, TCK)};
814 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
815 PtrAsInt ? PtrAsInt : Ptr);
816 }
817
818 // If possible, check that the vptr indicates that there is a subobject of
819 // type Ty at offset zero within this object.
820 //
821 // C++11 [basic.life]p5,6:
822 // [For storage which does not refer to an object within its lifetime]
823 // The program has undefined behavior if:
824 // -- the [pointer or glvalue] is used to access a non-static data member
825 // or call a non-static member function
826 if (SanOpts.has(SanitizerKind::Vptr) &&
827 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
828 // Ensure that the pointer is non-null before loading it. If there is no
829 // compile-time guarantee, reuse the run-time null check or emit a new one.
830 if (!IsGuaranteedNonNull) {
831 if (!IsNonNull)
832 IsNonNull = Builder.CreateIsNotNull(Ptr);
833 if (!Done)
834 Done = createBasicBlock("vptr.null");
835 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
836 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
837 EmitBlock(VptrNotNull);
838 }
839
840 // Compute a deterministic hash of the mangled name of the type.
841 SmallString<64> MangledName;
842 llvm::raw_svector_ostream Out(MangledName);
844 Out);
845
846 // Contained in NoSanitizeList based on the mangled type.
847 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
848 Out.str())) {
849 // Load the vptr, and mix it with TypeHash.
850 llvm::Value *TypeHash =
851 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
852
853 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
854 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
855 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
856 Ty->getAsCXXRecordDecl(),
858 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
859
860 llvm::Value *Hash =
861 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
862 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
863
864 // Look the hash up in our cache.
865 const int CacheSize = 128;
866 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
867 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
868 "__ubsan_vptr_type_cache");
869 llvm::Value *Slot = Builder.CreateAnd(Hash,
870 llvm::ConstantInt::get(IntPtrTy,
871 CacheSize-1));
872 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
873 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
874 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
876
877 // If the hash isn't in the cache, call a runtime handler to perform the
878 // hard work of checking whether the vptr is for an object of the right
879 // type. This will either fill in the cache and return, or produce a
880 // diagnostic.
881 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
882 llvm::Constant *StaticData[] = {
886 llvm::ConstantInt::get(Int8Ty, TCK)
887 };
888 llvm::Value *DynamicData[] = { Ptr, Hash };
889 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
890 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
891 DynamicData);
892 }
893 }
894
895 if (Done) {
896 Builder.CreateBr(Done);
897 EmitBlock(Done);
898 }
899}
900
902 QualType EltTy) {
904 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
905 if (!EltSize)
906 return nullptr;
907
908 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
909 if (!ArrayDeclRef)
910 return nullptr;
911
912 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
913 if (!ParamDecl)
914 return nullptr;
915
916 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
917 if (!POSAttr)
918 return nullptr;
919
920 // Don't load the size if it's a lower bound.
921 int POSType = POSAttr->getType();
922 if (POSType != 0 && POSType != 1)
923 return nullptr;
924
925 // Find the implicit size parameter.
926 auto PassedSizeIt = SizeArguments.find(ParamDecl);
927 if (PassedSizeIt == SizeArguments.end())
928 return nullptr;
929
930 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
931 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
932 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
933 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
934 C.getSizeType(), E->getExprLoc());
935 llvm::Value *SizeOfElement =
936 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
937 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
938}
939
940/// If Base is known to point to the start of an array, return the length of
941/// that array. Return 0 if the length cannot be determined.
942static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
943 const Expr *Base,
944 QualType &IndexedType,
946 StrictFlexArraysLevel) {
947 // For the vector indexing extension, the bound is the number of elements.
948 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
949 IndexedType = Base->getType();
950 return CGF.Builder.getInt32(VT->getNumElements());
951 }
952
953 Base = Base->IgnoreParens();
954
955 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
956 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
957 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
958 StrictFlexArraysLevel)) {
959 CodeGenFunction::SanitizerScope SanScope(&CGF);
960
961 IndexedType = CE->getSubExpr()->getType();
962 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
963 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
964 return CGF.Builder.getInt(CAT->getSize());
965
966 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
967 return CGF.getVLASize(VAT).NumElts;
968 // Ignore pass_object_size here. It's not applicable on decayed pointers.
969 }
970 }
971
972 CodeGenFunction::SanitizerScope SanScope(&CGF);
973
974 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
975 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
976 IndexedType = Base->getType();
977 return POS;
978 }
979
980 return nullptr;
981}
982
983namespace {
984
985/// \p StructAccessBase returns the base \p Expr of a field access. It returns
986/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
987///
988/// p in p-> a.b.c
989///
990/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
991/// looking for:
992///
993/// struct s {
994/// struct s *ptr;
995/// int count;
996/// char array[] __attribute__((counted_by(count)));
997/// };
998///
999/// If we have an expression like \p p->ptr->array[index], we want the
1000/// \p MemberExpr for \p p->ptr instead of \p p.
1001class StructAccessBase
1002 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1003 const RecordDecl *ExpectedRD;
1004
1005 bool IsExpectedRecordDecl(const Expr *E) const {
1006 QualType Ty = E->getType();
1007 if (Ty->isPointerType())
1008 Ty = Ty->getPointeeType();
1009 return ExpectedRD == Ty->getAsRecordDecl();
1010 }
1011
1012public:
1013 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1014
1015 //===--------------------------------------------------------------------===//
1016 // Visitor Methods
1017 //===--------------------------------------------------------------------===//
1018
1019 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1020 // horrors like this:
1021 //
1022 // struct S {
1023 // int x, y;
1024 // int blah[] __attribute__((counted_by(x)));
1025 // } s;
1026 //
1027 // int foo(int index, int val) {
1028 // int (S::*IHatePMDs)[] = &S::blah;
1029 // (s.*IHatePMDs)[index] = val;
1030 // }
1031
1032 const Expr *Visit(const Expr *E) {
1034 }
1035
1036 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1037
1038 // These are the types we expect to return (in order of most to least
1039 // likely):
1040 //
1041 // 1. DeclRefExpr - This is the expression for the base of the structure.
1042 // It's exactly what we want to build an access to the \p counted_by
1043 // field.
1044 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1045 // as the flexble array member's lexical enclosing \p RecordDecl. This
1046 // allows us to catch things like: "p->p->array"
1047 // 3. CompoundLiteralExpr - This is for people who create something
1048 // heretical like (struct foo has a flexible array member):
1049 //
1050 // (struct foo){ 1, 2 }.blah[idx];
1051 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1052 return IsExpectedRecordDecl(E) ? E : nullptr;
1053 }
1054 const Expr *VisitMemberExpr(const MemberExpr *E) {
1055 if (IsExpectedRecordDecl(E) && E->isArrow())
1056 return E;
1057 const Expr *Res = Visit(E->getBase());
1058 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1059 }
1060 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1061 return IsExpectedRecordDecl(E) ? E : nullptr;
1062 }
1063 const Expr *VisitCallExpr(const CallExpr *E) {
1064 return IsExpectedRecordDecl(E) ? E : nullptr;
1065 }
1066
1067 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1068 if (IsExpectedRecordDecl(E))
1069 return E;
1070 return Visit(E->getBase());
1071 }
1072 const Expr *VisitCastExpr(const CastExpr *E) {
1073 return Visit(E->getSubExpr());
1074 }
1075 const Expr *VisitParenExpr(const ParenExpr *E) {
1076 return Visit(E->getSubExpr());
1077 }
1078 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1079 return Visit(E->getSubExpr());
1080 }
1081 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1082 return Visit(E->getSubExpr());
1083 }
1084};
1085
1086} // end anonymous namespace
1087
1089
1091 const FieldDecl *Field,
1092 RecIndicesTy &Indices) {
1093 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1094 int64_t FieldNo = -1;
1095 for (const FieldDecl *FD : RD->fields()) {
1096 if (!Layout.containsFieldDecl(FD))
1097 // This could happen if the field has a struct type that's empty. I don't
1098 // know why either.
1099 continue;
1100
1101 FieldNo = Layout.getLLVMFieldNo(FD);
1102 if (FD == Field) {
1103 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1104 return true;
1105 }
1106
1107 QualType Ty = FD->getType();
1108 if (Ty->isRecordType()) {
1109 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1110 if (RD->isUnion())
1111 FieldNo = 0;
1112 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1113 return true;
1114 }
1115 }
1116 }
1117
1118 return false;
1119}
1120
1121/// This method is typically called in contexts where we can't generate
1122/// side-effects, like in __builtin_dynamic_object_size. When finding
1123/// expressions, only choose those that have either already been emitted or can
1124/// be loaded without side-effects.
1125///
1126/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1127/// within the top-level struct.
1128/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1130 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1131 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1132
1133 // Find the base struct expr (i.e. p in p->a.b.c.d).
1134 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1135 if (!StructBase || StructBase->HasSideEffects(getContext()))
1136 return nullptr;
1137
1138 llvm::Value *Res = nullptr;
1139 if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) {
1140 Res = EmitDeclRefLValue(DRE).getPointer(*this);
1141 Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res,
1142 getPointerAlign(), "dre.load");
1143 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
1144 LValue LV = EmitMemberExpr(ME);
1145 Address Addr = LV.getAddress();
1146 Res = Addr.emitRawPointer(*this);
1147 } else if (StructBase->getType()->isPointerType()) {
1148 LValueBaseInfo BaseInfo;
1149 TBAAAccessInfo TBAAInfo;
1150 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1151 Res = Addr.emitRawPointer(*this);
1152 } else {
1153 return nullptr;
1154 }
1155
1156 RecIndicesTy Indices;
1157 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1158 if (Indices.empty())
1159 return nullptr;
1160
1161 Indices.push_back(Builder.getInt32(0));
1163 ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
1164 RecIndicesTy(llvm::reverse(Indices)), "..counted_by.gep");
1165
1166 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res,
1167 getIntAlign(), "..counted_by.load");
1168}
1169
1170void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1171 llvm::Value *Index, QualType IndexType,
1172 bool Accessed) {
1173 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1174 "should not be called unless adding bounds checks");
1175 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1176 getLangOpts().getStrictFlexArraysLevel();
1177 QualType IndexedType;
1178 llvm::Value *Bound =
1179 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1180
1181 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1182}
1183
1184void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1185 llvm::Value *Index,
1186 QualType IndexType,
1187 QualType IndexedType, bool Accessed) {
1188 if (!Bound)
1189 return;
1190
1191 SanitizerScope SanScope(this);
1192
1193 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1194 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1195 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1196
1197 llvm::Constant *StaticData[] = {
1199 EmitCheckTypeDescriptor(IndexedType),
1200 EmitCheckTypeDescriptor(IndexType)
1201 };
1202 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1203 : Builder.CreateICmpULE(IndexVal, BoundVal);
1204 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1205 SanitizerHandler::OutOfBounds, StaticData, Index);
1206}
1207
1210 bool isInc, bool isPre) {
1212
1213 llvm::Value *NextVal;
1214 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1215 uint64_t AmountVal = isInc ? 1 : -1;
1216 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1217
1218 // Add the inc/dec to the real part.
1219 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1220 } else {
1221 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1222 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1223 if (!isInc)
1224 FVal.changeSign();
1225 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1226
1227 // Add the inc/dec to the real part.
1228 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1229 }
1230
1231 ComplexPairTy IncVal(NextVal, InVal.second);
1232
1233 // Store the updated result through the lvalue.
1234 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1235 if (getLangOpts().OpenMP)
1237 E->getSubExpr());
1238
1239 // If this is a postinc, return the value read from memory, otherwise use the
1240 // updated value.
1241 return isPre ? IncVal : InVal;
1242}
1243
1245 CodeGenFunction *CGF) {
1246 // Bind VLAs in the cast type.
1247 if (CGF && E->getType()->isVariablyModifiedType())
1249
1250 if (CGDebugInfo *DI = getModuleDebugInfo())
1251 DI->EmitExplicitCastType(E->getType());
1252}
1253
1254//===----------------------------------------------------------------------===//
1255// LValue Expression Emission
1256//===----------------------------------------------------------------------===//
1257
1259 TBAAAccessInfo *TBAAInfo,
1260 KnownNonNull_t IsKnownNonNull,
1261 CodeGenFunction &CGF) {
1262 // We allow this with ObjC object pointers because of fragile ABIs.
1263 assert(E->getType()->isPointerType() ||
1265 E = E->IgnoreParens();
1266
1267 // Casts:
1268 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1269 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1270 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1271
1272 switch (CE->getCastKind()) {
1273 // Non-converting casts (but not C's implicit conversion from void*).
1274 case CK_BitCast:
1275 case CK_NoOp:
1276 case CK_AddressSpaceConversion:
1277 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1278 if (PtrTy->getPointeeType()->isVoidType())
1279 break;
1280
1281 LValueBaseInfo InnerBaseInfo;
1282 TBAAAccessInfo InnerTBAAInfo;
1284 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1285 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1286 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1287
1288 if (isa<ExplicitCastExpr>(CE)) {
1289 LValueBaseInfo TargetTypeBaseInfo;
1290 TBAAAccessInfo TargetTypeTBAAInfo;
1292 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1293 if (TBAAInfo)
1294 *TBAAInfo =
1295 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1296 // If the source l-value is opaque, honor the alignment of the
1297 // casted-to type.
1298 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1299 if (BaseInfo)
1300 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1301 Addr.setAlignment(Align);
1302 }
1303 }
1304
1305 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1306 CE->getCastKind() == CK_BitCast) {
1307 if (auto PT = E->getType()->getAs<PointerType>())
1308 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1309 /*MayBeNull=*/true,
1311 CE->getBeginLoc());
1312 }
1313
1314 llvm::Type *ElemTy =
1316 Addr = Addr.withElementType(ElemTy);
1317 if (CE->getCastKind() == CK_AddressSpaceConversion)
1318 Addr = CGF.Builder.CreateAddrSpaceCast(
1319 Addr, CGF.ConvertType(E->getType()), ElemTy);
1320 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1321 CE->getType());
1322 }
1323 break;
1324
1325 // Array-to-pointer decay.
1326 case CK_ArrayToPointerDecay:
1327 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1328
1329 // Derived-to-base conversions.
1330 case CK_UncheckedDerivedToBase:
1331 case CK_DerivedToBase: {
1332 // TODO: Support accesses to members of base classes in TBAA. For now, we
1333 // conservatively pretend that the complete object is of the base class
1334 // type.
1335 if (TBAAInfo)
1336 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1338 CE->getSubExpr(), BaseInfo, nullptr,
1339 (KnownNonNull_t)(IsKnownNonNull ||
1340 CE->getCastKind() == CK_UncheckedDerivedToBase));
1341 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1342 return CGF.GetAddressOfBaseClass(
1343 Addr, Derived, CE->path_begin(), CE->path_end(),
1344 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1345 }
1346
1347 // TODO: Is there any reason to treat base-to-derived conversions
1348 // specially?
1349 default:
1350 break;
1351 }
1352 }
1353
1354 // Unary &.
1355 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1356 if (UO->getOpcode() == UO_AddrOf) {
1357 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1358 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1359 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1360 return LV.getAddress();
1361 }
1362 }
1363
1364 // std::addressof and variants.
1365 if (auto *Call = dyn_cast<CallExpr>(E)) {
1366 switch (Call->getBuiltinCallee()) {
1367 default:
1368 break;
1369 case Builtin::BIaddressof:
1370 case Builtin::BI__addressof:
1371 case Builtin::BI__builtin_addressof: {
1372 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1373 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1374 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1375 return LV.getAddress();
1376 }
1377 }
1378 }
1379
1380 // TODO: conditional operators, comma.
1381
1382 // Otherwise, use the alignment of the type.
1385 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1386}
1387
1388/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1389/// derive a more accurate bound on the alignment of the pointer.
1391 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1392 KnownNonNull_t IsKnownNonNull) {
1393 Address Addr =
1394 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1395 if (IsKnownNonNull && !Addr.isKnownNonNull())
1396 Addr.setKnownNonNull();
1397 return Addr;
1398}
1399
1401 llvm::Value *V = RV.getScalarVal();
1402 if (auto MPT = T->getAs<MemberPointerType>())
1403 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1404 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1405}
1406
1408 if (Ty->isVoidType())
1409 return RValue::get(nullptr);
1410
1411 switch (getEvaluationKind(Ty)) {
1412 case TEK_Complex: {
1413 llvm::Type *EltTy =
1415 llvm::Value *U = llvm::UndefValue::get(EltTy);
1416 return RValue::getComplex(std::make_pair(U, U));
1417 }
1418
1419 // If this is a use of an undefined aggregate type, the aggregate must have an
1420 // identifiable address. Just because the contents of the value are undefined
1421 // doesn't mean that the address can't be taken and compared.
1422 case TEK_Aggregate: {
1423 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1424 return RValue::getAggregate(DestPtr);
1425 }
1426
1427 case TEK_Scalar:
1428 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1429 }
1430 llvm_unreachable("bad evaluation kind");
1431}
1432
1434 const char *Name) {
1435 ErrorUnsupported(E, Name);
1436 return GetUndefRValue(E->getType());
1437}
1438
1440 const char *Name) {
1441 ErrorUnsupported(E, Name);
1442 llvm::Type *ElTy = ConvertType(E->getType());
1443 llvm::Type *Ty = UnqualPtrTy;
1444 return MakeAddrLValue(
1445 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1446}
1447
1448bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1449 const Expr *Base = Obj;
1450 while (!isa<CXXThisExpr>(Base)) {
1451 // The result of a dynamic_cast can be null.
1452 if (isa<CXXDynamicCastExpr>(Base))
1453 return false;
1454
1455 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1456 Base = CE->getSubExpr();
1457 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1458 Base = PE->getSubExpr();
1459 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1460 if (UO->getOpcode() == UO_Extension)
1461 Base = UO->getSubExpr();
1462 else
1463 return false;
1464 } else {
1465 return false;
1466 }
1467 }
1468 return true;
1469}
1470
1471LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1472 LValue LV;
1473 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1474 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1475 else
1476 LV = EmitLValue(E);
1477 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1478 SanitizerSet SkippedChecks;
1479 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1480 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1481 if (IsBaseCXXThis)
1482 SkippedChecks.set(SanitizerKind::Alignment, true);
1483 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1484 SkippedChecks.set(SanitizerKind::Null, true);
1485 }
1486 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1487 }
1488 return LV;
1489}
1490
1491/// EmitLValue - Emit code to compute a designator that specifies the location
1492/// of the expression.
1493///
1494/// This can return one of two things: a simple address or a bitfield reference.
1495/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1496/// an LLVM pointer type.
1497///
1498/// If this returns a bitfield reference, nothing about the pointee type of the
1499/// LLVM value is known: For example, it may not be a pointer to an integer.
1500///
1501/// If this returns a normal address, and if the lvalue's C type is fixed size,
1502/// this method guarantees that the returned pointer type will point to an LLVM
1503/// type of the same size of the lvalue's type. If the lvalue has a variable
1504/// length type, this is not possible.
1505///
1507 KnownNonNull_t IsKnownNonNull) {
1508 LValue LV = EmitLValueHelper(E, IsKnownNonNull);
1509 if (IsKnownNonNull && !LV.isKnownNonNull())
1510 LV.setKnownNonNull();
1511 return LV;
1512}
1513
1515 const ASTContext &Ctx) {
1516 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1517 if (isa<OpaqueValueExpr>(SE))
1518 return SE->getType();
1519 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1520}
1521
1522LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1523 KnownNonNull_t IsKnownNonNull) {
1524 ApplyDebugLocation DL(*this, E);
1525 switch (E->getStmtClass()) {
1526 default: return EmitUnsupportedLValue(E, "l-value expression");
1527
1528 case Expr::ObjCPropertyRefExprClass:
1529 llvm_unreachable("cannot emit a property reference directly");
1530
1531 case Expr::ObjCSelectorExprClass:
1532 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1533 case Expr::ObjCIsaExprClass:
1534 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1535 case Expr::BinaryOperatorClass:
1536 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1537 case Expr::CompoundAssignOperatorClass: {
1538 QualType Ty = E->getType();
1539 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1540 Ty = AT->getValueType();
1541 if (!Ty->isAnyComplexType())
1542 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1543 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1544 }
1545 case Expr::CallExprClass:
1546 case Expr::CXXMemberCallExprClass:
1547 case Expr::CXXOperatorCallExprClass:
1548 case Expr::UserDefinedLiteralClass:
1549 return EmitCallExprLValue(cast<CallExpr>(E));
1550 case Expr::CXXRewrittenBinaryOperatorClass:
1551 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1552 IsKnownNonNull);
1553 case Expr::VAArgExprClass:
1554 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1555 case Expr::DeclRefExprClass:
1556 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1557 case Expr::ConstantExprClass: {
1558 const ConstantExpr *CE = cast<ConstantExpr>(E);
1559 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1561 return MakeNaturalAlignAddrLValue(Result, RetType);
1562 }
1563 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1564 }
1565 case Expr::ParenExprClass:
1566 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1567 case Expr::GenericSelectionExprClass:
1568 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1569 IsKnownNonNull);
1570 case Expr::PredefinedExprClass:
1571 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1572 case Expr::StringLiteralClass:
1573 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1574 case Expr::ObjCEncodeExprClass:
1575 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1576 case Expr::PseudoObjectExprClass:
1577 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1578 case Expr::InitListExprClass:
1579 return EmitInitListLValue(cast<InitListExpr>(E));
1580 case Expr::CXXTemporaryObjectExprClass:
1581 case Expr::CXXConstructExprClass:
1582 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1583 case Expr::CXXBindTemporaryExprClass:
1584 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1585 case Expr::CXXUuidofExprClass:
1586 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1587 case Expr::LambdaExprClass:
1588 return EmitAggExprToLValue(E);
1589
1590 case Expr::ExprWithCleanupsClass: {
1591 const auto *cleanups = cast<ExprWithCleanups>(E);
1592 RunCleanupsScope Scope(*this);
1593 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1594 if (LV.isSimple()) {
1595 // Defend against branches out of gnu statement expressions surrounded by
1596 // cleanups.
1597 Address Addr = LV.getAddress();
1598 llvm::Value *V = Addr.getBasePointer();
1599 Scope.ForceCleanup({&V});
1600 Addr.replaceBasePointer(V);
1601 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1602 LV.getBaseInfo(), LV.getTBAAInfo());
1603 }
1604 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1605 // bitfield lvalue or some other non-simple lvalue?
1606 return LV;
1607 }
1608
1609 case Expr::CXXDefaultArgExprClass: {
1610 auto *DAE = cast<CXXDefaultArgExpr>(E);
1611 CXXDefaultArgExprScope Scope(*this, DAE);
1612 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1613 }
1614 case Expr::CXXDefaultInitExprClass: {
1615 auto *DIE = cast<CXXDefaultInitExpr>(E);
1616 CXXDefaultInitExprScope Scope(*this, DIE);
1617 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1618 }
1619 case Expr::CXXTypeidExprClass:
1620 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1621
1622 case Expr::ObjCMessageExprClass:
1623 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1624 case Expr::ObjCIvarRefExprClass:
1625 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1626 case Expr::StmtExprClass:
1627 return EmitStmtExprLValue(cast<StmtExpr>(E));
1628 case Expr::UnaryOperatorClass:
1629 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1630 case Expr::ArraySubscriptExprClass:
1631 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1632 case Expr::MatrixSubscriptExprClass:
1633 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1634 case Expr::ArraySectionExprClass:
1635 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1636 case Expr::ExtVectorElementExprClass:
1637 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1638 case Expr::CXXThisExprClass:
1640 case Expr::MemberExprClass:
1641 return EmitMemberExpr(cast<MemberExpr>(E));
1642 case Expr::CompoundLiteralExprClass:
1643 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1644 case Expr::ConditionalOperatorClass:
1645 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1646 case Expr::BinaryConditionalOperatorClass:
1647 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1648 case Expr::ChooseExprClass:
1649 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1650 case Expr::OpaqueValueExprClass:
1651 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1652 case Expr::SubstNonTypeTemplateParmExprClass:
1653 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1654 IsKnownNonNull);
1655 case Expr::ImplicitCastExprClass:
1656 case Expr::CStyleCastExprClass:
1657 case Expr::CXXFunctionalCastExprClass:
1658 case Expr::CXXStaticCastExprClass:
1659 case Expr::CXXDynamicCastExprClass:
1660 case Expr::CXXReinterpretCastExprClass:
1661 case Expr::CXXConstCastExprClass:
1662 case Expr::CXXAddrspaceCastExprClass:
1663 case Expr::ObjCBridgedCastExprClass:
1664 return EmitCastLValue(cast<CastExpr>(E));
1665
1666 case Expr::MaterializeTemporaryExprClass:
1667 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1668
1669 case Expr::CoawaitExprClass:
1670 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1671 case Expr::CoyieldExprClass:
1672 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1673 case Expr::PackIndexingExprClass:
1674 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1675 }
1676}
1677
1678/// Given an object of the given canonical type, can we safely copy a
1679/// value out of it based on its initializer?
1681 assert(type.isCanonical());
1682 assert(!type->isReferenceType());
1683
1684 // Must be const-qualified but non-volatile.
1685 Qualifiers qs = type.getLocalQualifiers();
1686 if (!qs.hasConst() || qs.hasVolatile()) return false;
1687
1688 // Otherwise, all object types satisfy this except C++ classes with
1689 // mutable subobjects or non-trivial copy/destroy behavior.
1690 if (const auto *RT = dyn_cast<RecordType>(type))
1691 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1692 if (RD->hasMutableFields() || !RD->isTrivial())
1693 return false;
1694
1695 return true;
1696}
1697
1698/// Can we constant-emit a load of a reference to a variable of the
1699/// given type? This is different from predicates like
1700/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1701/// in situations that don't necessarily satisfy the language's rules
1702/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1703/// to do this with const float variables even if those variables
1704/// aren't marked 'constexpr'.
1712 type = type.getCanonicalType();
1713 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1714 if (isConstantEmittableObjectType(ref->getPointeeType()))
1716 return CEK_AsReferenceOnly;
1717 }
1719 return CEK_AsValueOnly;
1720 return CEK_None;
1721}
1722
1723/// Try to emit a reference to the given value without producing it as
1724/// an l-value. This is just an optimization, but it avoids us needing
1725/// to emit global copies of variables if they're named without triggering
1726/// a formal use in a context where we can't emit a direct reference to them,
1727/// for instance if a block or lambda or a member of a local class uses a
1728/// const int variable or constexpr variable from an enclosing function.
1729CodeGenFunction::ConstantEmission
1731 ValueDecl *value = refExpr->getDecl();
1732
1733 // The value needs to be an enum constant or a constant variable.
1735 if (isa<ParmVarDecl>(value)) {
1736 CEK = CEK_None;
1737 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1738 CEK = checkVarTypeForConstantEmission(var->getType());
1739 } else if (isa<EnumConstantDecl>(value)) {
1740 CEK = CEK_AsValueOnly;
1741 } else {
1742 CEK = CEK_None;
1743 }
1744 if (CEK == CEK_None) return ConstantEmission();
1745
1746 Expr::EvalResult result;
1747 bool resultIsReference;
1748 QualType resultType;
1749
1750 // It's best to evaluate all the way as an r-value if that's permitted.
1751 if (CEK != CEK_AsReferenceOnly &&
1752 refExpr->EvaluateAsRValue(result, getContext())) {
1753 resultIsReference = false;
1754 resultType = refExpr->getType();
1755
1756 // Otherwise, try to evaluate as an l-value.
1757 } else if (CEK != CEK_AsValueOnly &&
1758 refExpr->EvaluateAsLValue(result, getContext())) {
1759 resultIsReference = true;
1760 resultType = value->getType();
1761
1762 // Failure.
1763 } else {
1764 return ConstantEmission();
1765 }
1766
1767 // In any case, if the initializer has side-effects, abandon ship.
1768 if (result.HasSideEffects)
1769 return ConstantEmission();
1770
1771 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1772 // referencing a global host variable by copy. In this case the lambda should
1773 // make a copy of the value of the global host variable. The DRE of the
1774 // captured reference variable cannot be emitted as load from the host
1775 // global variable as compile time constant, since the host variable is not
1776 // accessible on device. The DRE of the captured reference variable has to be
1777 // loaded from captures.
1778 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1780 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1781 if (MD && MD->getParent()->isLambda() &&
1782 MD->getOverloadedOperator() == OO_Call) {
1783 const APValue::LValueBase &base = result.Val.getLValueBase();
1784 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1785 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1786 if (!VD->hasAttr<CUDADeviceAttr>()) {
1787 return ConstantEmission();
1788 }
1789 }
1790 }
1791 }
1792 }
1793
1794 // Emit as a constant.
1795 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1796 result.Val, resultType);
1797
1798 // Make sure we emit a debug reference to the global variable.
1799 // This should probably fire even for
1800 if (isa<VarDecl>(value)) {
1801 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1802 EmitDeclRefExprDbgValue(refExpr, result.Val);
1803 } else {
1804 assert(isa<EnumConstantDecl>(value));
1805 EmitDeclRefExprDbgValue(refExpr, result.Val);
1806 }
1807
1808 // If we emitted a reference constant, we need to dereference that.
1809 if (resultIsReference)
1811
1813}
1814
1816 const MemberExpr *ME) {
1817 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1818 // Try to emit static variable member expressions as DREs.
1819 return DeclRefExpr::Create(
1821 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1822 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1823 }
1824 return nullptr;
1825}
1826
1827CodeGenFunction::ConstantEmission
1830 return tryEmitAsConstant(DRE);
1831 return ConstantEmission();
1832}
1833
1835 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1836 assert(Constant && "not a constant");
1837 if (Constant.isReference())
1838 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1839 E->getExprLoc())
1840 .getScalarVal();
1841 return Constant.getValue();
1842}
1843
1844llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1846 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1847 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1848 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1849}
1850
1852 if (Ty->isBooleanType())
1853 return true;
1854
1855 if (const EnumType *ET = Ty->getAs<EnumType>())
1856 return ET->getDecl()->getIntegerType()->isBooleanType();
1857
1858 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1859 return hasBooleanRepresentation(AT->getValueType());
1860
1861 return false;
1862}
1863
1865 llvm::APInt &Min, llvm::APInt &End,
1866 bool StrictEnums, bool IsBool) {
1867 const EnumType *ET = Ty->getAs<EnumType>();
1868 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1869 ET && !ET->getDecl()->isFixed();
1870 if (!IsBool && !IsRegularCPlusPlusEnum)
1871 return false;
1872
1873 if (IsBool) {
1874 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1875 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1876 } else {
1877 const EnumDecl *ED = ET->getDecl();
1878 ED->getValueRange(End, Min);
1879 }
1880 return true;
1881}
1882
1883llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1884 llvm::APInt Min, End;
1885 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1887 return nullptr;
1888
1889 llvm::MDBuilder MDHelper(getLLVMContext());
1890 return MDHelper.createRange(Min, End);
1891}
1892
1895 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1896 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1897 if (!HasBoolCheck && !HasEnumCheck)
1898 return false;
1899
1900 bool IsBool = hasBooleanRepresentation(Ty) ||
1902 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1903 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1904 if (!NeedsBoolCheck && !NeedsEnumCheck)
1905 return false;
1906
1907 // Single-bit booleans don't need to be checked. Special-case this to avoid
1908 // a bit width mismatch when handling bitfield values. This is handled by
1909 // EmitFromMemory for the non-bitfield case.
1910 if (IsBool &&
1911 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1912 return false;
1913
1914 llvm::APInt Min, End;
1915 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1916 return true;
1917
1918 auto &Ctx = getLLVMContext();
1919 SanitizerScope SanScope(this);
1920 llvm::Value *Check;
1921 --End;
1922 if (!Min) {
1923 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1924 } else {
1925 llvm::Value *Upper =
1926 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1927 llvm::Value *Lower =
1928 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1929 Check = Builder.CreateAnd(Upper, Lower);
1930 }
1931 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1934 NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1935 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1936 StaticArgs, EmitCheckValue(Value));
1937 return true;
1938}
1939
1940llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1941 QualType Ty,
1943 LValueBaseInfo BaseInfo,
1944 TBAAAccessInfo TBAAInfo,
1945 bool isNontemporal) {
1946 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1947 if (GV->isThreadLocal())
1948 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1950
1951 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1952 // Boolean vectors use `iN` as storage type.
1953 if (ClangVecTy->isExtVectorBoolType()) {
1954 llvm::Type *ValTy = ConvertType(Ty);
1955 unsigned ValNumElems =
1956 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1957 // Load the `iP` storage object (P is the padded vector size).
1958 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1959 const auto *RawIntTy = RawIntV->getType();
1960 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1961 // Bitcast iP --> <P x i1>.
1962 auto *PaddedVecTy = llvm::FixedVectorType::get(
1963 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1964 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1965 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1966 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
1967
1968 return EmitFromMemory(V, Ty);
1969 }
1970
1971 // Handle vectors of size 3 like size 4 for better performance.
1972 const llvm::Type *EltTy = Addr.getElementType();
1973 const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1974
1975 if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
1976
1977 llvm::VectorType *vec4Ty =
1978 llvm::FixedVectorType::get(VTy->getElementType(), 4);
1979 Address Cast = Addr.withElementType(vec4Ty);
1980 // Now load value.
1981 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1982
1983 // Shuffle vector to get vec3.
1984 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
1985 return EmitFromMemory(V, Ty);
1986 }
1987 }
1988
1989 // Atomic operations have to be done on integral types.
1990 LValue AtomicLValue =
1991 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1992 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1993 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1994 }
1995
1996 Addr =
1998
1999 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2000 if (isNontemporal) {
2001 llvm::MDNode *Node = llvm::MDNode::get(
2002 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2003 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2004 }
2005
2006 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2007
2008 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2009 // In order to prevent the optimizer from throwing away the check, don't
2010 // attach range metadata to the load.
2011 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2012 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2013 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2014 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2015 llvm::MDNode::get(getLLVMContext(), std::nullopt));
2016 }
2017
2018 return EmitFromMemory(Load, Ty);
2019}
2020
2021/// Converts a scalar value from its primary IR type (as returned
2022/// by ConvertType) to its load/store type (as returned by
2023/// convertTypeForLoadStore).
2024llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2025 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2026 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2028 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2029 }
2030
2031 if (Ty->isExtVectorBoolType()) {
2032 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2033 // Expand to the memory bit width.
2034 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2035 // <N x i1> --> <P x i1>.
2036 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2037 // <P x i1> --> iP.
2038 Value = Builder.CreateBitCast(Value, StoreTy);
2039 }
2040
2041 return Value;
2042}
2043
2044/// Converts a scalar value from its load/store type (as returned
2045/// by convertTypeForLoadStore) to its primary IR type (as returned
2046/// by ConvertType).
2047llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2048 if (Ty->isExtVectorBoolType()) {
2049 const auto *RawIntTy = Value->getType();
2050 // Bitcast iP --> <P x i1>.
2051 auto *PaddedVecTy = llvm::FixedVectorType::get(
2052 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2053 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2054 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2055 llvm::Type *ValTy = ConvertType(Ty);
2056 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2057 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2058 }
2059
2060 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2061 llvm::Type *ResTy = ConvertType(Ty);
2062 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2063 }
2064
2065 return Value;
2066}
2067
2068// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2069// MatrixType), if it points to a array (the memory type of MatrixType).
2071 CodeGenFunction &CGF,
2072 bool IsVector = true) {
2073 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2074 if (ArrayTy && IsVector) {
2075 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2076 ArrayTy->getNumElements());
2077
2078 return Addr.withElementType(VectorTy);
2079 }
2080 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2081 if (VectorTy && !IsVector) {
2082 auto *ArrayTy = llvm::ArrayType::get(
2083 VectorTy->getElementType(),
2084 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2085
2086 return Addr.withElementType(ArrayTy);
2087 }
2088
2089 return Addr;
2090}
2091
2092// Emit a store of a matrix LValue. This may require casting the original
2093// pointer to memory address (ArrayType) to a pointer to the value type
2094// (VectorType).
2095static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2096 bool isInit, CodeGenFunction &CGF) {
2097 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2098 value->getType()->isVectorTy());
2099 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2100 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2101 lvalue.isNontemporal());
2102}
2103
2104void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2105 bool Volatile, QualType Ty,
2106 LValueBaseInfo BaseInfo,
2107 TBAAAccessInfo TBAAInfo,
2108 bool isInit, bool isNontemporal) {
2109 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2110 if (GV->isThreadLocal())
2111 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2113
2114 llvm::Type *SrcTy = Value->getType();
2115 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2116 auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
2117 if (!CGM.getCodeGenOpts().PreserveVec3Type) {
2118 // Handle vec3 special.
2119 if (VecTy && !ClangVecTy->isExtVectorBoolType() &&
2120 cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
2121 // Our source is a vec3, do a shuffle vector to make it a vec4.
2122 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
2123 "extractVec");
2124 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
2125 }
2126 if (Addr.getElementType() != SrcTy) {
2127 Addr = Addr.withElementType(SrcTy);
2128 }
2129 }
2130 }
2131
2132 Value = EmitToMemory(Value, Ty);
2133
2134 LValue AtomicLValue =
2135 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2136 if (Ty->isAtomicType() ||
2137 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2138 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2139 return;
2140 }
2141
2142 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2143 if (isNontemporal) {
2144 llvm::MDNode *Node =
2145 llvm::MDNode::get(Store->getContext(),
2146 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2147 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2148 }
2149
2150 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2151}
2152
2153void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2154 bool isInit) {
2155 if (lvalue.getType()->isConstantMatrixType()) {
2156 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2157 return;
2158 }
2159
2160 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2161 lvalue.getType(), lvalue.getBaseInfo(),
2162 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2163}
2164
2165// Emit a load of a LValue of matrix type. This may require casting the pointer
2166// to memory address (ArrayType) to a pointer to the value type (VectorType).
2168 CodeGenFunction &CGF) {
2169 assert(LV.getType()->isConstantMatrixType());
2171 LV.setAddress(Addr);
2172 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2173}
2174
2177 QualType Ty = LV.getType();
2178 switch (getEvaluationKind(Ty)) {
2179 case TEK_Scalar:
2180 return EmitLoadOfLValue(LV, Loc);
2181 case TEK_Complex:
2183 case TEK_Aggregate:
2184 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2185 return Slot.asRValue();
2186 }
2187 llvm_unreachable("bad evaluation kind");
2188}
2189
2190/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2191/// method emits the address of the lvalue, then loads the result as an rvalue,
2192/// returning the rvalue.
2194 if (LV.isObjCWeak()) {
2195 // load of a __weak object.
2196 Address AddrWeakObj = LV.getAddress();
2198 AddrWeakObj));
2199 }
2201 // In MRC mode, we do a load+autorelease.
2202 if (!getLangOpts().ObjCAutoRefCount) {
2204 }
2205
2206 // In ARC mode, we load retained and then consume the value.
2207 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2208 Object = EmitObjCConsumeObject(LV.getType(), Object);
2209 return RValue::get(Object);
2210 }
2211
2212 if (LV.isSimple()) {
2213 assert(!LV.getType()->isFunctionType());
2214
2215 if (LV.getType()->isConstantMatrixType())
2216 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2217
2218 // Everything needs a load.
2219 return RValue::get(EmitLoadOfScalar(LV, Loc));
2220 }
2221
2222 if (LV.isVectorElt()) {
2223 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2224 LV.isVolatileQualified());
2225 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2226 "vecext"));
2227 }
2228
2229 // If this is a reference to a subset of the elements of a vector, either
2230 // shuffle the input or extract/insert them as appropriate.
2231 if (LV.isExtVectorElt()) {
2233 }
2234
2235 // Global Register variables always invoke intrinsics
2236 if (LV.isGlobalReg())
2237 return EmitLoadOfGlobalRegLValue(LV);
2238
2239 if (LV.isMatrixElt()) {
2240 llvm::Value *Idx = LV.getMatrixIdx();
2241 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2242 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2243 llvm::MatrixBuilder MB(Builder);
2244 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2245 }
2246 llvm::LoadInst *Load =
2248 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2249 }
2250
2251 assert(LV.isBitField() && "Unknown LValue type!");
2252 return EmitLoadOfBitfieldLValue(LV, Loc);
2253}
2254
2257 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2258
2259 // Get the output type.
2260 llvm::Type *ResLTy = ConvertType(LV.getType());
2261
2262 Address Ptr = LV.getBitFieldAddress();
2263 llvm::Value *Val =
2264 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2265
2266 bool UseVolatile = LV.isVolatileQualified() &&
2267 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2268 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2269 const unsigned StorageSize =
2270 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2271 if (Info.IsSigned) {
2272 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2273 unsigned HighBits = StorageSize - Offset - Info.Size;
2274 if (HighBits)
2275 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2276 if (Offset + HighBits)
2277 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2278 } else {
2279 if (Offset)
2280 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2281 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2282 Val = Builder.CreateAnd(
2283 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2284 }
2285 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2286 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2287 return RValue::get(Val);
2288}
2289
2290// If this is a reference to a subset of the elements of a vector, create an
2291// appropriate shufflevector.
2293 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2294 LV.isVolatileQualified());
2295
2296 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2297 // IR value to a vector here allows the rest of codegen to behave as normal.
2298 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2299 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2300 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2301 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2302 }
2303
2304 const llvm::Constant *Elts = LV.getExtVectorElts();
2305
2306 // If the result of the expression is a non-vector type, we must be extracting
2307 // a single element. Just codegen as an extractelement.
2308 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2309 if (!ExprVT) {
2310 unsigned InIdx = getAccessedFieldNo(0, Elts);
2311 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2312 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2313 }
2314
2315 // Always use shuffle vector to try to retain the original program structure
2316 unsigned NumResultElts = ExprVT->getNumElements();
2317
2319 for (unsigned i = 0; i != NumResultElts; ++i)
2320 Mask.push_back(getAccessedFieldNo(i, Elts));
2321
2322 Vec = Builder.CreateShuffleVector(Vec, Mask);
2323 return RValue::get(Vec);
2324}
2325
2326/// Generates lvalue for partial ext_vector access.
2328 Address VectorAddress = LV.getExtVectorAddress();
2329 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2330 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2331
2332 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2333
2334 const llvm::Constant *Elts = LV.getExtVectorElts();
2335 unsigned ix = getAccessedFieldNo(0, Elts);
2336
2337 Address VectorBasePtrPlusIx =
2338 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2339 "vector.elt");
2340
2341 return VectorBasePtrPlusIx;
2342}
2343
2344/// Load of global gamed gegisters are always calls to intrinsics.
2346 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2347 "Bad type for register variable");
2348 llvm::MDNode *RegName = cast<llvm::MDNode>(
2349 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2350
2351 // We accept integer and pointer types only
2352 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2353 llvm::Type *Ty = OrigTy;
2354 if (OrigTy->isPointerTy())
2355 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2356 llvm::Type *Types[] = { Ty };
2357
2358 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2359 llvm::Value *Call = Builder.CreateCall(
2360 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2361 if (OrigTy->isPointerTy())
2362 Call = Builder.CreateIntToPtr(Call, OrigTy);
2363 return RValue::get(Call);
2364}
2365
2366/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2367/// lvalue, where both are guaranteed to the have the same type, and that type
2368/// is 'Ty'.
2370 bool isInit) {
2371 if (!Dst.isSimple()) {
2372 if (Dst.isVectorElt()) {
2373 // Read/modify/write the vector, inserting the new element.
2374 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2375 Dst.isVolatileQualified());
2376 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2377 if (IRStoreTy) {
2378 auto *IRVecTy = llvm::FixedVectorType::get(
2379 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2380 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2381 // iN --> <N x i1>.
2382 }
2383 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2384 Dst.getVectorIdx(), "vecins");
2385 if (IRStoreTy) {
2386 // <N x i1> --> <iN>.
2387 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2388 }
2390 Dst.isVolatileQualified());
2391 return;
2392 }
2393
2394 // If this is an update of extended vector elements, insert them as
2395 // appropriate.
2396 if (Dst.isExtVectorElt())
2398
2399 if (Dst.isGlobalReg())
2400 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2401
2402 if (Dst.isMatrixElt()) {
2403 llvm::Value *Idx = Dst.getMatrixIdx();
2404 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2405 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2406 llvm::MatrixBuilder MB(Builder);
2407 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2408 }
2409 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2410 llvm::Value *Vec =
2411 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2413 Dst.isVolatileQualified());
2414 return;
2415 }
2416
2417 assert(Dst.isBitField() && "Unknown LValue type");
2418 return EmitStoreThroughBitfieldLValue(Src, Dst);
2419 }
2420
2421 // There's special magic for assigning into an ARC-qualified l-value.
2422 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2423 switch (Lifetime) {
2425 llvm_unreachable("present but none");
2426
2428 // nothing special
2429 break;
2430
2432 if (isInit) {
2433 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2434 break;
2435 }
2436 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2437 return;
2438
2440 if (isInit)
2441 // Initialize and then skip the primitive store.
2443 else
2445 /*ignore*/ true);
2446 return;
2447
2450 Src.getScalarVal()));
2451 // fall into the normal path
2452 break;
2453 }
2454 }
2455
2456 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2457 // load of a __weak object.
2458 Address LvalueDst = Dst.getAddress();
2459 llvm::Value *src = Src.getScalarVal();
2460 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2461 return;
2462 }
2463
2464 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2465 // load of a __strong object.
2466 Address LvalueDst = Dst.getAddress();
2467 llvm::Value *src = Src.getScalarVal();
2468 if (Dst.isObjCIvar()) {
2469 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2470 llvm::Type *ResultType = IntPtrTy;
2472 llvm::Value *RHS = dst.emitRawPointer(*this);
2473 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2474 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2475 ResultType, "sub.ptr.lhs.cast");
2476 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2477 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2478 } else if (Dst.isGlobalObjCRef()) {
2479 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2480 Dst.isThreadLocalRef());
2481 }
2482 else
2483 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2484 return;
2485 }
2486
2487 assert(Src.isScalar() && "Can't emit an agg store with this method");
2488 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2489}
2490
2492 llvm::Value **Result) {
2493 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2494 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2495 Address Ptr = Dst.getBitFieldAddress();
2496
2497 // Get the source value, truncated to the width of the bit-field.
2498 llvm::Value *SrcVal = Src.getScalarVal();
2499
2500 // Cast the source to the storage type and shift it into place.
2501 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2502 /*isSigned=*/false);
2503 llvm::Value *MaskedVal = SrcVal;
2504
2505 const bool UseVolatile =
2506 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2507 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2508 const unsigned StorageSize =
2509 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2510 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2511 // See if there are other bits in the bitfield's storage we'll need to load
2512 // and mask together with source before storing.
2513 if (StorageSize != Info.Size) {
2514 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2515 llvm::Value *Val =
2516 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2517
2518 // Mask the source value as needed.
2520 SrcVal = Builder.CreateAnd(
2521 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2522 "bf.value");
2523 MaskedVal = SrcVal;
2524 if (Offset)
2525 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2526
2527 // Mask out the original value.
2528 Val = Builder.CreateAnd(
2529 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2530 "bf.clear");
2531
2532 // Or together the unchanged values and the source value.
2533 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2534 } else {
2535 assert(Offset == 0);
2536 // According to the AACPS:
2537 // When a volatile bit-field is written, and its container does not overlap
2538 // with any non-bit-field member, its container must be read exactly once
2539 // and written exactly once using the access width appropriate to the type
2540 // of the container. The two accesses are not atomic.
2541 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2542 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2543 Builder.CreateLoad(Ptr, true, "bf.load");
2544 }
2545
2546 // Write the new value back out.
2547 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2548
2549 // Return the new value of the bit-field, if requested.
2550 if (Result) {
2551 llvm::Value *ResultVal = MaskedVal;
2552
2553 // Sign extend the value if needed.
2554 if (Info.IsSigned) {
2555 assert(Info.Size <= StorageSize);
2556 unsigned HighBits = StorageSize - Info.Size;
2557 if (HighBits) {
2558 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2559 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2560 }
2561 }
2562
2563 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2564 "bf.result.cast");
2565 *Result = EmitFromMemory(ResultVal, Dst.getType());
2566 }
2567}
2568
2570 LValue Dst) {
2571 // HLSL allows storing to scalar values through ExtVector component LValues.
2572 // To support this we need to handle the case where the destination address is
2573 // a scalar.
2574 Address DstAddr = Dst.getExtVectorAddress();
2575 if (!DstAddr.getElementType()->isVectorTy()) {
2576 assert(!Dst.getType()->isVectorType() &&
2577 "this should only occur for non-vector l-values");
2578 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2579 return;
2580 }
2581
2582 // This access turns into a read/modify/write of the vector. Load the input
2583 // value now.
2584 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2585 const llvm::Constant *Elts = Dst.getExtVectorElts();
2586
2587 llvm::Value *SrcVal = Src.getScalarVal();
2588
2589 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2590 unsigned NumSrcElts = VTy->getNumElements();
2591 unsigned NumDstElts =
2592 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2593 if (NumDstElts == NumSrcElts) {
2594 // Use shuffle vector is the src and destination are the same number of
2595 // elements and restore the vector mask since it is on the side it will be
2596 // stored.
2597 SmallVector<int, 4> Mask(NumDstElts);
2598 for (unsigned i = 0; i != NumSrcElts; ++i)
2599 Mask[getAccessedFieldNo(i, Elts)] = i;
2600
2601 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2602 } else if (NumDstElts > NumSrcElts) {
2603 // Extended the source vector to the same length and then shuffle it
2604 // into the destination.
2605 // FIXME: since we're shuffling with undef, can we just use the indices
2606 // into that? This could be simpler.
2607 SmallVector<int, 4> ExtMask;
2608 for (unsigned i = 0; i != NumSrcElts; ++i)
2609 ExtMask.push_back(i);
2610 ExtMask.resize(NumDstElts, -1);
2611 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2612 // build identity
2614 for (unsigned i = 0; i != NumDstElts; ++i)
2615 Mask.push_back(i);
2616
2617 // When the vector size is odd and .odd or .hi is used, the last element
2618 // of the Elts constant array will be one past the size of the vector.
2619 // Ignore the last element here, if it is greater than the mask size.
2620 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2621 NumSrcElts--;
2622
2623 // modify when what gets shuffled in
2624 for (unsigned i = 0; i != NumSrcElts; ++i)
2625 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2626 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2627 } else {
2628 // We should never shorten the vector
2629 llvm_unreachable("unexpected shorten vector length");
2630 }
2631 } else {
2632 // If the Src is a scalar (not a vector), and the target is a vector it must
2633 // be updating one element.
2634 unsigned InIdx = getAccessedFieldNo(0, Elts);
2635 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2636 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2637 }
2638
2640 Dst.isVolatileQualified());
2641}
2642
2643/// Store of global named registers are always calls to intrinsics.
2645 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2646 "Bad type for register variable");
2647 llvm::MDNode *RegName = cast<llvm::MDNode>(
2648 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2649 assert(RegName && "Register LValue is not metadata");
2650
2651 // We accept integer and pointer types only
2652 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2653 llvm::Type *Ty = OrigTy;
2654 if (OrigTy->isPointerTy())
2655 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2656 llvm::Type *Types[] = { Ty };
2657
2658 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2659 llvm::Value *Value = Src.getScalarVal();
2660 if (OrigTy->isPointerTy())
2661 Value = Builder.CreatePtrToInt(Value, Ty);
2662 Builder.CreateCall(
2663 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2664}
2665
2666// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2667// generating write-barries API. It is currently a global, ivar,
2668// or neither.
2669static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2670 LValue &LV,
2671 bool IsMemberAccess=false) {
2672 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2673 return;
2674
2675 if (isa<ObjCIvarRefExpr>(E)) {
2676 QualType ExpTy = E->getType();
2677 if (IsMemberAccess && ExpTy->isPointerType()) {
2678 // If ivar is a structure pointer, assigning to field of
2679 // this struct follows gcc's behavior and makes it a non-ivar
2680 // writer-barrier conservatively.
2681 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2682 if (ExpTy->isRecordType()) {
2683 LV.setObjCIvar(false);
2684 return;
2685 }
2686 }
2687 LV.setObjCIvar(true);
2688 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2689 LV.setBaseIvarExp(Exp->getBase());
2691 return;
2692 }
2693
2694 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2695 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2696 if (VD->hasGlobalStorage()) {
2697 LV.setGlobalObjCRef(true);
2698 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2699 }
2700 }
2702 return;
2703 }
2704
2705 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2706 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2707 return;
2708 }
2709
2710 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2711 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2712 if (LV.isObjCIvar()) {
2713 // If cast is to a structure pointer, follow gcc's behavior and make it
2714 // a non-ivar write-barrier.
2715 QualType ExpTy = E->getType();
2716 if (ExpTy->isPointerType())
2717 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2718 if (ExpTy->isRecordType())
2719 LV.setObjCIvar(false);
2720 }
2721 return;
2722 }
2723
2724 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2725 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2726 return;
2727 }
2728
2729 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2730 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2731 return;
2732 }
2733
2734 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2735 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2736 return;
2737 }
2738
2739 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2740 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2741 return;
2742 }
2743
2744 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2745 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2746 if (LV.isObjCIvar() && !LV.isObjCArray())
2747 // Using array syntax to assigning to what an ivar points to is not
2748 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2749 LV.setObjCIvar(false);
2750 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2751 // Using array syntax to assigning to what global points to is not
2752 // same as assigning to the global itself. {id *G;} G[i] = 0;
2753 LV.setGlobalObjCRef(false);
2754 return;
2755 }
2756
2757 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2758 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2759 // We don't know if member is an 'ivar', but this flag is looked at
2760 // only in the context of LV.isObjCIvar().
2762 return;
2763 }
2764}
2765
2767 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2768 llvm::Type *RealVarTy, SourceLocation Loc) {
2769 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2771 CGF, VD, Addr, Loc);
2772 else
2773 Addr =
2774 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2775
2776 Addr = Addr.withElementType(RealVarTy);
2777 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2778}
2779
2781 const VarDecl *VD, QualType T) {
2782 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2783 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2784 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2785 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2786 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2787 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2788 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2790 return Address::invalid();
2791 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2792 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2793 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2795 "Expected link clause OR to clause with unified memory enabled.");
2796 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2798 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2799}
2800
2801Address
2803 LValueBaseInfo *PointeeBaseInfo,
2804 TBAAAccessInfo *PointeeTBAAInfo) {
2805 llvm::LoadInst *Load =
2806 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2808 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2809 CharUnits(), /*ForPointeeType=*/true,
2810 PointeeBaseInfo, PointeeTBAAInfo);
2811}
2812
2814 LValueBaseInfo PointeeBaseInfo;
2815 TBAAAccessInfo PointeeTBAAInfo;
2816 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2817 &PointeeTBAAInfo);
2818 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2819 PointeeBaseInfo, PointeeTBAAInfo);
2820}
2821
2823 const PointerType *PtrTy,
2824 LValueBaseInfo *BaseInfo,
2825 TBAAAccessInfo *TBAAInfo) {
2826 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2827 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2828 CharUnits(), /*ForPointeeType=*/true,
2829 BaseInfo, TBAAInfo);
2830}
2831
2833 const PointerType *PtrTy) {
2834 LValueBaseInfo BaseInfo;
2835 TBAAAccessInfo TBAAInfo;
2836 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2837 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2838}
2839
2841 const Expr *E, const VarDecl *VD) {
2842 QualType T = E->getType();
2843
2844 // If it's thread_local, emit a call to its wrapper function instead.
2845 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2847 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2848 // Check if the variable is marked as declare target with link clause in
2849 // device codegen.
2850 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2851 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2852 if (Addr.isValid())
2853 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2854 }
2855
2856 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2857
2858 if (VD->getTLSKind() != VarDecl::TLS_None)
2859 V = CGF.Builder.CreateThreadLocalAddress(V);
2860
2861 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2862 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2863 Address Addr(V, RealVarTy, Alignment);
2864 // Emit reference to the private copy of the variable if it is an OpenMP
2865 // threadprivate variable.
2866 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2867 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2868 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2869 E->getExprLoc());
2870 }
2871 LValue LV = VD->getType()->isReferenceType() ?
2872 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2875 setObjCGCLValueClass(CGF.getContext(), E, LV);
2876 return LV;
2877}
2878
2880 llvm::Type *Ty) {
2881 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2882 if (FD->hasAttr<WeakRefAttr>()) {
2884 return aliasee.getPointer();
2885 }
2886
2887 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
2888 return V;
2889}
2890
2892 GlobalDecl GD) {
2893 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2894 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
2895 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2896 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2898}
2899
2901 llvm::Value *ThisValue) {
2902
2903 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2904}
2905
2906/// Named Registers are named metadata pointing to the register name
2907/// which will be read from/written to as an argument to the intrinsic
2908/// @llvm.read/write_register.
2909/// So far, only the name is being passed down, but other options such as
2910/// register type, allocation type or even optimization options could be
2911/// passed down via the metadata node.
2913 SmallString<64> Name("llvm.named.register.");
2914 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2915 assert(Asm->getLabel().size() < 64-Name.size() &&
2916 "Register name too big");
2917 Name.append(Asm->getLabel());
2918 llvm::NamedMDNode *M =
2919 CGM.getModule().getOrInsertNamedMetadata(Name);
2920 if (M->getNumOperands() == 0) {
2921 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2922 Asm->getLabel());
2923 llvm::Metadata *Ops[] = {Str};
2924 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2925 }
2926
2927 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2928
2929 llvm::Value *Ptr =
2930 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2931 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2932}
2933
2934/// Determine whether we can emit a reference to \p VD from the current
2935/// context, despite not necessarily having seen an odr-use of the variable in
2936/// this context.
2938 const DeclRefExpr *E,
2939 const VarDecl *VD) {
2940 // For a variable declared in an enclosing scope, do not emit a spurious
2941 // reference even if we have a capture, as that will emit an unwarranted
2942 // reference to our capture state, and will likely generate worse code than
2943 // emitting a local copy.
2944 if (E->refersToEnclosingVariableOrCapture())
2945 return false;
2946
2947 // For a local declaration declared in this function, we can always reference
2948 // it even if we don't have an odr-use.
2949 if (VD->hasLocalStorage()) {
2950 return VD->getDeclContext() ==
2951 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2952 }
2953
2954 // For a global declaration, we can emit a reference to it if we know
2955 // for sure that we are able to emit a definition of it.
2956 VD = VD->getDefinition(CGF.getContext());
2957 if (!VD)
2958 return false;
2959
2960 // Don't emit a spurious reference if it might be to a variable that only
2961 // exists on a different device / target.
2962 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2963 // cross-target reference.
2964 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2965 CGF.getLangOpts().OpenCL) {
2966 return false;
2967 }
2968
2969 // We can emit a spurious reference only if the linkage implies that we'll
2970 // be emitting a non-interposable symbol that will be retained until link
2971 // time.
2972 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
2973 case llvm::GlobalValue::ExternalLinkage:
2974 case llvm::GlobalValue::LinkOnceODRLinkage:
2975 case llvm::GlobalValue::WeakODRLinkage:
2976 case llvm::GlobalValue::InternalLinkage:
2977 case llvm::GlobalValue::PrivateLinkage:
2978 return true;
2979 default:
2980 return false;
2981 }
2982}
2983
2985 const NamedDecl *ND = E->getDecl();
2986 QualType T = E->getType();
2987
2988 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2989 "should not emit an unevaluated operand");
2990
2991 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2992 // Global Named registers access via intrinsics only
2993 if (VD->getStorageClass() == SC_Register &&
2994 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2995 return EmitGlobalNamedRegister(VD, CGM);
2996
2997 // If this DeclRefExpr does not constitute an odr-use of the variable,
2998 // we're not permitted to emit a reference to it in general, and it might
2999 // not be captured if capture would be necessary for a use. Emit the
3000 // constant value directly instead.
3001 if (E->isNonOdrUse() == NOUR_Constant &&
3002 (VD->getType()->isReferenceType() ||
3003 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3004 VD->getAnyInitializer(VD);
3005 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3006 E->getLocation(), *VD->evaluateValue(), VD->getType());
3007 assert(Val && "failed to emit constant expression");
3008
3009 Address Addr = Address::invalid();
3010 if (!VD->getType()->isReferenceType()) {
3011 // Spill the constant value to a global.
3012 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3013 getContext().getDeclAlign(VD));
3014 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3015 auto *PTy = llvm::PointerType::get(
3016 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
3017 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3018 } else {
3019 // Should we be using the alignment of the constant pointer we emitted?
3020 CharUnits Alignment =
3022 /* BaseInfo= */ nullptr,
3023 /* TBAAInfo= */ nullptr,
3024 /* forPointeeType= */ true);
3025 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3026 }
3027 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3028 }
3029
3030 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3031
3032 // Check for captured variables.
3033 if (E->refersToEnclosingVariableOrCapture()) {
3034 VD = VD->getCanonicalDecl();
3035 if (auto *FD = LambdaCaptureFields.lookup(VD))
3036 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3037 if (CapturedStmtInfo) {
3038 auto I = LocalDeclMap.find(VD);
3039 if (I != LocalDeclMap.end()) {
3040 LValue CapLVal;
3041 if (VD->getType()->isReferenceType())
3042 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3044 else
3045 CapLVal = MakeAddrLValue(I->second, T);
3046 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3047 // in simd context.
3048 if (getLangOpts().OpenMP &&
3050 CapLVal.setNontemporal(/*Value=*/true);
3051 return CapLVal;
3052 }
3053 LValue CapLVal =
3056 Address LValueAddress = CapLVal.getAddress();
3057 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3058 LValueAddress.getElementType(),
3059 getContext().getDeclAlign(VD)),
3060 CapLVal.getType(),
3062 CapLVal.getTBAAInfo());
3063 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3064 // in simd context.
3065 if (getLangOpts().OpenMP &&
3067 CapLVal.setNontemporal(/*Value=*/true);
3068 return CapLVal;
3069 }
3070
3071 assert(isa<BlockDecl>(CurCodeDecl));
3072 Address addr = GetAddrOfBlockDecl(VD);
3073 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3074 }
3075 }
3076
3077 // FIXME: We should be able to assert this for FunctionDecls as well!
3078 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3079 // those with a valid source location.
3080 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3081 !E->getLocation().isValid()) &&
3082 "Should not use decl without marking it used!");
3083
3084 if (ND->hasAttr<WeakRefAttr>()) {
3085 const auto *VD = cast<ValueDecl>(ND);
3087 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3088 }
3089
3090 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3091 // Check if this is a global variable.
3092 if (VD->hasLinkage() || VD->isStaticDataMember())
3093 return EmitGlobalVarDeclLValue(*this, E, VD);
3094
3095 Address addr = Address::invalid();
3096
3097 // The variable should generally be present in the local decl map.
3098 auto iter = LocalDeclMap.find(VD);
3099 if (iter != LocalDeclMap.end()) {
3100 addr = iter->second;
3101
3102 // Otherwise, it might be static local we haven't emitted yet for
3103 // some reason; most likely, because it's in an outer function.
3104 } else if (VD->isStaticLocal()) {
3105 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3107 addr = Address(
3108 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3109
3110 // No other cases for now.
3111 } else {
3112 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3113 }
3114
3115 // Handle threadlocal function locals.
3116 if (VD->getTLSKind() != VarDecl::TLS_None)
3117 addr = addr.withPointer(
3118 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3120
3121 // Check for OpenMP threadprivate variables.
3122 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3123 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3125 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3126 E->getExprLoc());
3127 }
3128
3129 // Drill into block byref variables.
3130 bool isBlockByref = VD->isEscapingByref();
3131 if (isBlockByref) {
3132 addr = emitBlockByrefAddress(addr, VD);
3133 }
3134
3135 // Drill into reference types.
3136 LValue LV = VD->getType()->isReferenceType() ?
3137 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3139
3140 bool isLocalStorage = VD->hasLocalStorage();
3141
3142 bool NonGCable = isLocalStorage &&
3143 !VD->getType()->isReferenceType() &&
3144 !isBlockByref;
3145 if (NonGCable) {
3147 LV.setNonGC(true);
3148 }
3149
3150 bool isImpreciseLifetime =
3151 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3152 if (isImpreciseLifetime)
3155 return LV;
3156 }
3157
3158 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3159 return EmitFunctionDeclLValue(*this, E, FD);
3160
3161 // FIXME: While we're emitting a binding from an enclosing scope, all other
3162 // DeclRefExprs we see should be implicitly treated as if they also refer to
3163 // an enclosing scope.
3164 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3165 if (E->refersToEnclosingVariableOrCapture()) {
3166 auto *FD = LambdaCaptureFields.lookup(BD);
3167 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3168 }
3169 return EmitLValue(BD->getBinding());
3170 }
3171
3172 // We can form DeclRefExprs naming GUID declarations when reconstituting
3173 // non-type template parameters into expressions.
3174 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3177
3178 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3179 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3180 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3181
3182 if (AS != T.getAddressSpace()) {
3183 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3184 auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS);
3186 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3187 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3188 }
3189
3190 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3191 }
3192
3193 llvm_unreachable("Unhandled DeclRefExpr");
3194}
3195
3197 // __extension__ doesn't affect lvalue-ness.
3198 if (E->getOpcode() == UO_Extension)
3199 return EmitLValue(E->getSubExpr());
3200
3201 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3202 switch (E->getOpcode()) {
3203 default: llvm_unreachable("Unknown unary operator lvalue!");
3204 case UO_Deref: {
3205 QualType T = E->getSubExpr()->getType()->getPointeeType();
3206 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3207
3208 LValueBaseInfo BaseInfo;
3209 TBAAAccessInfo TBAAInfo;
3210 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3211 &TBAAInfo);
3212 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3214
3215 // We should not generate __weak write barrier on indirect reference
3216 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3217 // But, we continue to generate __strong write barrier on indirect write
3218 // into a pointer to object.
3219 if (getLangOpts().ObjC &&
3220 getLangOpts().getGC() != LangOptions::NonGC &&
3221 LV.isObjCWeak())
3223 return LV;
3224 }
3225 case UO_Real:
3226 case UO_Imag: {
3227 LValue LV = EmitLValue(E->getSubExpr());
3228 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3229
3230 // __real is valid on scalars. This is a faster way of testing that.
3231 // __imag can only produce an rvalue on scalars.
3232 if (E->getOpcode() == UO_Real &&
3233 !LV.getAddress().getElementType()->isStructTy()) {
3234 assert(E->getSubExpr()->getType()->isArithmeticType());
3235 return LV;
3236 }
3237
3238 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3239
3240 Address Component =
3241 (E->getOpcode() == UO_Real
3243 : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3244 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3246 ElemLV.getQuals().addQualifiers(LV.getQuals());
3247 return ElemLV;
3248 }
3249 case UO_PreInc:
3250 case UO_PreDec: {
3251 LValue LV = EmitLValue(E->getSubExpr());
3252 bool isInc = E->getOpcode() == UO_PreInc;
3253
3254 if (E->getType()->isAnyComplexType())
3255 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3256 else
3257 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3258 return LV;
3259 }
3260 }
3261}
3262
3266}
3267
3271}
3272
3274 auto SL = E->getFunctionName();
3275 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3276 StringRef FnName = CurFn->getName();
3277 if (FnName.starts_with("\01"))
3278 FnName = FnName.substr(1);
3279 StringRef NameItems[] = {
3280 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3281 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3282 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3283 std::string Name = std::string(SL->getString());
3284 if (!Name.empty()) {
3285 unsigned Discriminator =
3287 if (Discriminator)
3288 Name += "_" + Twine(Discriminator + 1).str();
3289 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3291 } else {
3292 auto C =
3293 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3295 }
3296 }
3297 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3299}
3300
3301/// Emit a type description suitable for use by a runtime sanitizer library. The
3302/// format of a type descriptor is
3303///
3304/// \code
3305/// { i16 TypeKind, i16 TypeInfo }
3306/// \endcode
3307///
3308/// followed by an array of i8 containing the type name with extra information
3309/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3310/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3311/// anything else.
3313 // Only emit each type's descriptor once.
3314 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3315 return C;
3316
3317 uint16_t TypeKind = TK_Unknown;
3318 uint16_t TypeInfo = 0;
3319 bool IsBitInt = false;
3320
3321 if (T->isIntegerType()) {
3322 TypeKind = TK_Integer;
3323 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3324 (T->isSignedIntegerType() ? 1 : 0);
3325 // Follow suggestion from discussion of issue 64100.
3326 // So we can write the exact amount of bits in TypeName after '\0'
3327 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3328 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3329 // Do a sanity checks as we are using 32-bit type to store bit length.
3330 assert(getContext().getTypeSize(T) > 0 &&
3331 " non positive amount of bits in __BitInt type");
3332 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3333 " too many bits in __BitInt type");
3334
3335 // Redefine TypeKind with the actual __BitInt type if we have signed
3336 // BitInt.
3337 TypeKind = TK_BitInt;
3338 IsBitInt = true;
3339 }
3340 } else if (T->isFloatingType()) {
3341 TypeKind = TK_Float;
3343 }
3344
3345 // Format the type name as if for a diagnostic, including quotes and
3346 // optionally an 'aka'.
3347 SmallString<32> Buffer;
3349 DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(),
3350 StringRef(), std::nullopt, Buffer, std::nullopt);
3351
3352 if (IsBitInt) {
3353 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3354 // endianness, zero.
3355 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3356 const auto *EIT = T->castAs<BitIntType>();
3357 uint32_t Bits = EIT->getNumBits();
3358 llvm::support::endian::write32(S + 1, Bits,
3359 getTarget().isBigEndian()
3360 ? llvm::endianness::big
3361 : llvm::endianness::little);
3362 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3363 Buffer.append(Str);
3364 }
3365
3366 llvm::Constant *Components[] = {
3367 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3368 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3369 };
3370 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3371
3372 auto *GV = new llvm::GlobalVariable(
3373 CGM.getModule(), Descriptor->getType(),
3374 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3375 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3377
3378 // Remember the descriptor for this type.
3380
3381 return GV;
3382}
3383
3384llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3385 llvm::Type *TargetTy = IntPtrTy;
3386
3387 if (V->getType() == TargetTy)
3388 return V;
3389
3390 // Floating-point types which fit into intptr_t are bitcast to integers
3391 // and then passed directly (after zero-extension, if necessary).
3392 if (V->getType()->isFloatingPointTy()) {
3393 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3394 if (Bits <= TargetTy->getIntegerBitWidth())
3395 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3396 Bits));
3397 }
3398
3399 // Integers which fit in intptr_t are zero-extended and passed directly.
3400 if (V->getType()->isIntegerTy() &&
3401 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3402 return Builder.CreateZExt(V, TargetTy);
3403
3404 // Pointers are passed directly, everything else is passed by address.
3405 if (!V->getType()->isPointerTy()) {
3406 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3407 Builder.CreateStore(V, Ptr);
3408 V = Ptr.getPointer();
3409 }
3410 return Builder.CreatePtrToInt(V, TargetTy);
3411}
3412
3413/// Emit a representation of a SourceLocation for passing to a handler
3414/// in a sanitizer runtime library. The format for this data is:
3415/// \code
3416/// struct SourceLocation {
3417/// const char *Filename;
3418/// int32_t Line, Column;
3419/// };
3420/// \endcode
3421/// For an invalid SourceLocation, the Filename pointer is null.
3423 llvm::Constant *Filename;
3424 int Line, Column;
3425
3427 if (PLoc.isValid()) {
3428 StringRef FilenameString = PLoc.getFilename();
3429
3430 int PathComponentsToStrip =
3431 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3432 if (PathComponentsToStrip < 0) {
3433 assert(PathComponentsToStrip != INT_MIN);
3434 int PathComponentsToKeep = -PathComponentsToStrip;
3435 auto I = llvm::sys::path::rbegin(FilenameString);
3436 auto E = llvm::sys::path::rend(FilenameString);
3437 while (I != E && --PathComponentsToKeep)
3438 ++I;
3439
3440 FilenameString = FilenameString.substr(I - E);
3441 } else if (PathComponentsToStrip > 0) {
3442 auto I = llvm::sys::path::begin(FilenameString);
3443 auto E = llvm::sys::path::end(FilenameString);
3444 while (I != E && PathComponentsToStrip--)
3445 ++I;
3446
3447 if (I != E)
3448 FilenameString =
3449 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3450 else
3451 FilenameString = llvm::sys::path::filename(FilenameString);
3452 }
3453
3454 auto FilenameGV =
3455 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3457 cast<llvm::GlobalVariable>(
3458 FilenameGV.getPointer()->stripPointerCasts()));
3459 Filename = FilenameGV.getPointer();
3460 Line = PLoc.getLine();
3461 Column = PLoc.getColumn();
3462 } else {
3463 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3464 Line = Column = 0;
3465 }
3466
3467 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3468 Builder.getInt32(Column)};
3469
3470 return llvm::ConstantStruct::getAnon(Data);
3471}
3472
3473namespace {
3474/// Specify under what conditions this check can be recovered
3475enum class CheckRecoverableKind {
3476 /// Always terminate program execution if this check fails.
3478 /// Check supports recovering, runtime has both fatal (noreturn) and
3479 /// non-fatal handlers for this check.
3480 Recoverable,
3481 /// Runtime conditionally aborts, always need to support recovery.
3483};
3484}
3485
3486static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3487 assert(Kind.countPopulation() == 1);
3488 if (Kind == SanitizerKind::Vptr)
3489 return CheckRecoverableKind::AlwaysRecoverable;
3490 else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
3491 return CheckRecoverableKind::Unrecoverable;
3492 else
3493 return CheckRecoverableKind::Recoverable;
3494}
3495
3496namespace {
3497struct SanitizerHandlerInfo {
3498 char const *const Name;
3499 unsigned Version;
3500};
3501}
3502
3503const SanitizerHandlerInfo SanitizerHandlers[] = {
3504#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3506#undef SANITIZER_CHECK
3507};
3508
3510 llvm::FunctionType *FnType,
3512 SanitizerHandler CheckHandler,
3513 CheckRecoverableKind RecoverKind, bool IsFatal,
3514 llvm::BasicBlock *ContBB) {
3515 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3516 std::optional<ApplyDebugLocation> DL;
3517 if (!CGF.Builder.getCurrentDebugLocation()) {
3518 // Ensure that the call has at least an artificial debug location.
3519 DL.emplace(CGF, SourceLocation());
3520 }
3521 bool NeedsAbortSuffix =
3522 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3523 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3524 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3525 const StringRef CheckName = CheckInfo.Name;
3526 std::string FnName = "__ubsan_handle_" + CheckName.str();
3527 if (CheckInfo.Version && !MinimalRuntime)
3528 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3529 if (MinimalRuntime)
3530 FnName += "_minimal";
3531 if (NeedsAbortSuffix)
3532 FnName += "_abort";
3533 bool MayReturn =
3534 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3535
3536 llvm::AttrBuilder B(CGF.getLLVMContext());
3537 if (!MayReturn) {
3538 B.addAttribute(llvm::Attribute::NoReturn)
3539 .addAttribute(llvm::Attribute::NoUnwind);
3540 }
3541 B.addUWTableAttr(llvm::UWTableKind::Default);
3542
3543 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3544 FnType, FnName,
3545 llvm::AttributeList::get(CGF.getLLVMContext(),
3546 llvm::AttributeList::FunctionIndex, B),
3547 /*Local=*/true);
3548 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3549 if (!MayReturn) {
3550 HandlerCall->setDoesNotReturn();
3551 CGF.Builder.CreateUnreachable();
3552 } else {
3553 CGF.Builder.CreateBr(ContBB);
3554 }
3555}
3556
3558 ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3559 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3560 ArrayRef<llvm::Value *> DynamicArgs) {
3561 assert(IsSanitizerScope);
3562 assert(Checked.size() > 0);
3563 assert(CheckHandler >= 0 &&
3564 size_t(CheckHandler) < std::size(SanitizerHandlers));
3565 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3566
3567 llvm::Value *FatalCond = nullptr;
3568 llvm::Value *RecoverableCond = nullptr;
3569 llvm::Value *TrapCond = nullptr;
3570 for (int i = 0, n = Checked.size(); i < n; ++i) {
3571 llvm::Value *Check = Checked[i].first;
3572 // -fsanitize-trap= overrides -fsanitize-recover=.
3573 llvm::Value *&Cond =
3574 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3575 ? TrapCond
3576 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3577 ? RecoverableCond
3578 : FatalCond;
3579 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3580 }
3581
3583 llvm::Value *Allow =
3584 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3585 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3586
3587 for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3588 if (*Cond)
3589 *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3590 }
3591 }
3592
3593 if (TrapCond)
3594 EmitTrapCheck(TrapCond, CheckHandler);
3595 if (!FatalCond && !RecoverableCond)
3596 return;
3597
3598 llvm::Value *JointCond;
3599 if (FatalCond && RecoverableCond)
3600 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3601 else
3602 JointCond = FatalCond ? FatalCond : RecoverableCond;
3603 assert(JointCond);
3604
3605 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3606 assert(SanOpts.has(Checked[0].second));
3607#ifndef NDEBUG
3608 for (int i = 1, n = Checked.size(); i < n; ++i) {
3609 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3610 "All recoverable kinds in a single check must be same!");
3611 assert(SanOpts.has(Checked[i].second));
3612 }
3613#endif
3614
3615 llvm::BasicBlock *Cont = createBasicBlock("cont");
3616 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3617 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3618 // Give hint that we very much don't expect to execute the handler
3619 llvm::MDBuilder MDHelper(getLLVMContext());
3620 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3621 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3622 EmitBlock(Handlers);
3623
3624 // Handler functions take an i8* pointing to the (handler-specific) static
3625 // information block, followed by a sequence of intptr_t arguments
3626 // representing operand values.
3629 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3630 Args.reserve(DynamicArgs.size() + 1);
3631 ArgTypes.reserve(DynamicArgs.size() + 1);
3632
3633 // Emit handler arguments and create handler function type.
3634 if (!StaticArgs.empty()) {
3635 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3636 auto *InfoPtr = new llvm::GlobalVariable(
3637 CGM.getModule(), Info->getType(), false,
3638 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3639 llvm::GlobalVariable::NotThreadLocal,
3640 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3641 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3643 Args.push_back(InfoPtr);
3644 ArgTypes.push_back(Args.back()->getType());
3645 }
3646
3647 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3648 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3649 ArgTypes.push_back(IntPtrTy);
3650 }
3651 }
3652
3653 llvm::FunctionType *FnType =
3654 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3655
3656 if (!FatalCond || !RecoverableCond) {
3657 // Simple case: we need to generate a single handler call, either
3658 // fatal, or non-fatal.
3659 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3660 (FatalCond != nullptr), Cont);
3661 } else {
3662 // Emit two handler calls: first one for set of unrecoverable checks,
3663 // another one for recoverable.
3664 llvm::BasicBlock *NonFatalHandlerBB =
3665 createBasicBlock("non_fatal." + CheckName);
3666 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3667 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3668 EmitBlock(FatalHandlerBB);
3669 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3670 NonFatalHandlerBB);
3671 EmitBlock(NonFatalHandlerBB);
3672 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3673 Cont);
3674 }
3675
3676 EmitBlock(Cont);
3677}
3678
3680 SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3681 llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3682 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3683
3684 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3685 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3686
3687 llvm::MDBuilder MDHelper(getLLVMContext());
3688 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3689 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3690
3691 EmitBlock(CheckBB);
3692
3693 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3694
3695 llvm::CallInst *CheckCall;
3696 llvm::FunctionCallee SlowPathFn;
3697 if (WithDiag) {
3698 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3699 auto *InfoPtr =
3700 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3701 llvm::GlobalVariable::PrivateLinkage, Info);
3702 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3704
3705 SlowPathFn = CGM.getModule().getOrInsertFunction(
3706 "__cfi_slowpath_diag",
3707 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3708 false));
3709 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3710 } else {
3711 SlowPathFn = CGM.getModule().getOrInsertFunction(
3712 "__cfi_slowpath",
3713 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3714 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3715 }
3716
3718 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3719 CheckCall->setDoesNotThrow();
3720
3721 EmitBlock(Cont);
3722}
3723
3724// Emit a stub for __cfi_check function so that the linker knows about this
3725// symbol in LTO mode.
3727 llvm::Module *M = &CGM.getModule();
3728 ASTContext &C = getContext();
3729 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3730
3732 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3733 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3734 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3736 FnArgs.push_back(&ArgCallsiteTypeId);
3737 FnArgs.push_back(&ArgAddr);
3738 FnArgs.push_back(&ArgCFICheckFailData);
3739 const CGFunctionInfo &FI =
3741
3742 llvm::Function *F = llvm::Function::Create(
3743 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3744 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3745 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3747 F->setAlignment(llvm::Align(4096));
3748 CGM.setDSOLocal(F);
3749
3750 llvm::LLVMContext &Ctx = M->getContext();
3751 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3752 // CrossDSOCFI pass is not executed if there is no executable code.
3753 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3754 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3755 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3756}
3757
3758// This function is basically a switch over the CFI failure kind, which is
3759// extracted from CFICheckFailData (1st function argument). Each case is either
3760// llvm.trap or a call to one of the two runtime handlers, based on
3761// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3762// failure kind) traps, but this should really never happen. CFICheckFailData
3763// can be nullptr if the calling module has -fsanitize-trap behavior for this
3764// check kind; in this case __cfi_check_fail traps as well.
3766 SanitizerScope SanScope(this);
3767 FunctionArgList Args;
3772 Args.push_back(&ArgData);
3773 Args.push_back(&ArgAddr);
3774
3775 const CGFunctionInfo &FI =
3777
3778 llvm::Function *F = llvm::Function::Create(
3779 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3780 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3781
3782 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3784 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3785
3786 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3787 SourceLocation());
3788
3789 // This function is not affected by NoSanitizeList. This function does
3790 // not have a source location, but "src:*" would still apply. Revert any
3791 // changes to SanOpts made in StartFunction.
3793
3794 llvm::Value *Data =
3795 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3796 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3797 llvm::Value *Addr =
3798 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3799 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3800
3801 // Data == nullptr means the calling module has trap behaviour for this check.
3802 llvm::Value *DataIsNotNullPtr =
3803 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3804 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3805
3806 llvm::StructType *SourceLocationTy =
3807 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3808 llvm::StructType *CfiCheckFailDataTy =
3809 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3810
3811 llvm::Value *V = Builder.CreateConstGEP2_32(
3812 CfiCheckFailDataTy,
3813 Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3814 0);
3815
3816 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3817 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3818
3819 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3821 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3822 llvm::Value *ValidVtable = Builder.CreateZExt(
3823 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3824 {Addr, AllVtables}),
3825 IntPtrTy);
3826
3827 const std::pair<int, SanitizerMask> CheckKinds[] = {
3828 {CFITCK_VCall, SanitizerKind::CFIVCall},
3829 {CFITCK_NVCall, SanitizerKind::CFINVCall},
3830 {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3831 {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3832 {CFITCK_ICall, SanitizerKind::CFIICall}};
3833
3835 for (auto CheckKindMaskPair : CheckKinds) {
3836 int Kind = CheckKindMaskPair.first;
3837 SanitizerMask Mask = CheckKindMaskPair.second;
3838 llvm::Value *Cond =
3839 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3840 if (CGM.getLangOpts().Sanitize.has(Mask))
3841 EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3842 {Data, Addr, ValidVtable});
3843 else
3844 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3845 }
3846
3848 // The only reference to this function will be created during LTO link.
3849 // Make sure it survives until then.
3850 CGM.addUsedGlobal(F);
3851}
3852
3854 if (SanOpts.has(SanitizerKind::Unreachable)) {
3855 SanitizerScope SanScope(this);
3856 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3857 SanitizerKind::Unreachable),
3858 SanitizerHandler::BuiltinUnreachable,
3859 EmitCheckSourceLocation(Loc), std::nullopt);
3860 }
3861 Builder.CreateUnreachable();
3862}
3863
3864void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3865 SanitizerHandler CheckHandlerID) {
3866 llvm::BasicBlock *Cont = createBasicBlock("cont");
3867
3868 // If we're optimizing, collapse all calls to trap down to just one per
3869 // check-type per function to save on code size.
3870 if ((int)TrapBBs.size() <= CheckHandlerID)
3871 TrapBBs.resize(CheckHandlerID + 1);
3872
3873 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3874
3876 CGM.getCodeGenOpts().OptimizationLevel && TrapBB &&
3877 (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) {
3878 auto Call = TrapBB->begin();
3879 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3880
3881 Call->applyMergedLocation(Call->getDebugLoc(),
3882 Builder.getCurrentDebugLocation());
3883 Builder.CreateCondBr(Checked, Cont, TrapBB);
3884 } else {
3885 TrapBB = createBasicBlock("trap");
3886 Builder.CreateCondBr(Checked, Cont, TrapBB);
3887 EmitBlock(TrapBB);
3888
3889 llvm::CallInst *TrapCall = Builder.CreateCall(
3890 CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3891 llvm::ConstantInt::get(CGM.Int8Ty,
3893 ? TrapBB->getParent()->size()
3894 : static_cast<uint64_t>(CheckHandlerID)));
3895
3896 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3897 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3899 TrapCall->addFnAttr(A);
3900 }
3901 TrapCall->setDoesNotReturn();
3902 TrapCall->setDoesNotThrow();
3903 Builder.CreateUnreachable();
3904 }
3905
3906 EmitBlock(Cont);
3907}
3908
3909llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3910 llvm::CallInst *TrapCall =
3911 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3912
3913 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3914 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3916 TrapCall->addFnAttr(A);
3917 }
3918
3920 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3921 return TrapCall;
3922}
3923
3925 LValueBaseInfo *BaseInfo,
3926 TBAAAccessInfo *TBAAInfo) {
3927 assert(E->getType()->isArrayType() &&
3928 "Array to pointer decay must have array source type!");
3929
3930 // Expressions of array type can't be bitfields or vector elements.
3931 LValue LV = EmitLValue(E);
3932 Address Addr = LV.getAddress();
3933
3934 // If the array type was an incomplete type, we need to make sure
3935 // the decay ends up being the right type.
3936 llvm::Type *NewTy = ConvertType(E->getType());
3937 Addr = Addr.withElementType(NewTy);
3938
3939 // Note that VLA pointers are always decayed, so we don't need to do
3940 // anything here.
3941 if (!E->getType()->isVariableArrayType()) {
3942 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3943 "Expected pointer to array");
3944 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3945 }
3946
3947 // The result of this decay conversion points to an array element within the
3948 // base lvalue. However, since TBAA currently does not support representing
3949 // accesses to elements of member arrays, we conservatively represent accesses
3950 // to the pointee object as if it had no any base lvalue specified.
3951 // TODO: Support TBAA for member arrays.
3953 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3954 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3955
3956 return Addr.withElementType(ConvertTypeForMem(EltType));
3957}
3958
3959/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3960/// array to pointer, return the array subexpression.
3961static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3962 // If this isn't just an array->pointer decay, bail out.
3963 const auto *CE = dyn_cast<CastExpr>(E);
3964 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3965 return nullptr;
3966
3967 // If this is a decay from variable width array, bail out.
3968 const Expr *SubExpr = CE->getSubExpr();
3969 if (SubExpr->getType()->isVariableArrayType())
3970 return nullptr;
3971
3972 return SubExpr;
3973}
3974
3976 llvm::Type *elemType,
3977 llvm::Value *ptr,
3978 ArrayRef<llvm::Value*> indices,
3979 bool inbounds,
3980 bool signedIndices,
3981 SourceLocation loc,
3982 const llvm::Twine &name = "arrayidx") {
3983 if (inbounds) {
3984 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
3986 name);
3987 } else {
3988 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
3989 }
3990}
3991
3994 llvm::Type *elementType, bool inbounds,
3995 bool signedIndices, SourceLocation loc,
3996 CharUnits align,
3997 const llvm::Twine &name = "arrayidx") {
3998 if (inbounds) {
3999 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4001 align, name);
4002 } else {
4003 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4004 }
4005}
4006
4008 llvm::Value *idx,
4009 CharUnits eltSize) {
4010 // If we have a constant index, we can use the exact offset of the
4011 // element we're accessing.
4012 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
4013 CharUnits offset = constantIdx->getZExtValue() * eltSize;
4014 return arrayAlign.alignmentAtOffset(offset);
4015
4016 // Otherwise, use the worst-case alignment for any element.
4017 } else {
4018 return arrayAlign.alignmentOfArrayElement(eltSize);
4019 }
4020}
4021
4023 const VariableArrayType *vla) {
4024 QualType eltType;
4025 do {
4026 eltType = vla->getElementType();
4027 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4028 return eltType;
4029}
4030
4032 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4033}
4034
4035static bool hasBPFPreserveStaticOffset(const Expr *E) {
4036 if (!E)
4037 return false;
4038 QualType PointeeType = E->getType()->getPointeeType();
4039 if (PointeeType.isNull())
4040 return false;
4041 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4042 return hasBPFPreserveStaticOffset(BaseDecl);
4043 return false;
4044}
4045
4046// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4048 Address &Addr) {
4049 if (!CGF.getTarget().getTriple().isBPF())
4050 return Addr;
4051
4052 llvm::Function *Fn =
4053 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4054 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4055 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4056}
4057
4058/// Given an array base, check whether its member access belongs to a record
4059/// with preserve_access_index attribute or not.
4060static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4061 if (!ArrayBase || !CGF.getDebugInfo())
4062 return false;
4063
4064 // Only support base as either a MemberExpr or DeclRefExpr.
4065 // DeclRefExpr to cover cases like:
4066 // struct s { int a; int b[10]; };
4067 // struct s *p;
4068 // p[1].a
4069 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4070 // p->b[5] is a MemberExpr example.
4071 const Expr *E = ArrayBase->IgnoreImpCasts();
4072 if (const auto *ME = dyn_cast<MemberExpr>(E))
4073 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4074
4075 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4076 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4077 if (!VarDef)
4078 return false;
4079
4080 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4081 if (!PtrT)
4082 return false;
4083
4084 const auto *PointeeT = PtrT->getPointeeType()
4086 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4087 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4088 return false;
4089 }
4090
4091 return false;
4092}
4093
4096 QualType eltType, bool inbounds,
4097 bool signedIndices, SourceLocation loc,
4098 QualType *arrayType = nullptr,
4099 const Expr *Base = nullptr,
4100 const llvm::Twine &name = "arrayidx") {
4101 // All the indices except that last must be zero.
4102#ifndef NDEBUG
4103 for (auto *idx : indices.drop_back())
4104 assert(isa<llvm::ConstantInt>(idx) &&
4105 cast<llvm::ConstantInt>(idx)->isZero());
4106#endif
4107
4108 // Determine the element size of the statically-sized base. This is
4109 // the thing that the indices are expressed in terms of.
4110 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4111 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4112 }
4113
4114 // We can use that to compute the best alignment of the element.
4115 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4116 CharUnits eltAlign =
4117 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4118
4120 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4121
4122 llvm::Value *eltPtr;
4123 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4124 if (!LastIndex ||
4126 addr = emitArraySubscriptGEP(CGF, addr, indices,
4127 CGF.ConvertTypeForMem(eltType), inbounds,
4128 signedIndices, loc, eltAlign, name);
4129 return addr;
4130 } else {
4131 // Remember the original array subscript for bpf target
4132 unsigned idx = LastIndex->getZExtValue();
4133 llvm::DIType *DbgInfo = nullptr;
4134 if (arrayType)
4135 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4136 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4137 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4138 idx, DbgInfo);
4139 }
4140
4141 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4142}
4143
4144/// The offset of a field from the beginning of the record.
4146 const FieldDecl *Field, int64_t &Offset) {
4147 ASTContext &Ctx = CGF.getContext();
4148 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4149 unsigned FieldNo = 0;
4150
4151 for (const FieldDecl *FD : RD->fields()) {
4152 if (FD == Field) {
4153 Offset += Layout.getFieldOffset(FieldNo);
4154 return true;
4155 }
4156
4157 QualType Ty = FD->getType();
4158 if (Ty->isRecordType())
4159 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4160 Offset += Layout.getFieldOffset(FieldNo);
4161 return true;
4162 }
4163
4164 if (!RD->isUnion())
4165 ++FieldNo;
4166 }
4167
4168 return false;
4169}
4170
4171/// Returns the relative offset difference between \p FD1 and \p FD2.
4172/// \code
4173/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4174/// \endcode
4175/// Both fields must be within the same struct.
4176static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4177 const FieldDecl *FD1,
4178 const FieldDecl *FD2) {
4179 const RecordDecl *FD1OuterRec =
4181 const RecordDecl *FD2OuterRec =
4183
4184 if (FD1OuterRec != FD2OuterRec)
4185 // Fields must be within the same RecordDecl.
4186 return std::optional<int64_t>();
4187
4188 int64_t FD1Offset = 0;
4189 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4190 return std::optional<int64_t>();
4191
4192 int64_t FD2Offset = 0;
4193 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4194 return std::optional<int64_t>();
4195
4196 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4197}
4198
4200 bool Accessed) {
4201 // The index must always be an integer, which is not an aggregate. Emit it
4202 // in lexical order (this complexity is, sadly, required by C++17).
4203 llvm::Value *IdxPre =
4204 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4205 bool SignedIndices = false;
4206 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4207 auto *Idx = IdxPre;
4208 if (E->getLHS() != E->getIdx()) {
4209 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4210 Idx = EmitScalarExpr(E->getIdx());
4211 }
4212
4213 QualType IdxTy = E->getIdx()->getType();
4214 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4215 SignedIndices |= IdxSigned;
4216
4217 if (SanOpts.has(SanitizerKind::ArrayBounds))
4218 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4219
4220 // Extend or truncate the index type to 32 or 64-bits.
4221 if (Promote && Idx->getType() != IntPtrTy)
4222 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4223
4224 return Idx;
4225 };
4226 IdxPre = nullptr;
4227
4228 // If the base is a vector type, then we are forming a vector element lvalue
4229 // with this subscript.
4230 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4231 !isa<ExtVectorElementExpr>(E->getBase())) {
4232 // Emit the vector as an lvalue to get its address.
4233 LValue LHS = EmitLValue(E->getBase());
4234 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4235 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4236 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4237 LHS.getBaseInfo(), TBAAAccessInfo());
4238 }
4239
4240 // All the other cases basically behave like simple offsetting.
4241
4242 // Handle the extvector case we ignored above.
4243 if (isa<ExtVectorElementExpr>(E->getBase())) {
4244 LValue LV = EmitLValue(E->getBase());
4245 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4247
4248 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4249 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4250 SignedIndices, E->getExprLoc());
4251 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4252 CGM.getTBAAInfoForSubobject(LV, EltType));
4253 }
4254
4255 LValueBaseInfo EltBaseInfo;
4256 TBAAAccessInfo EltTBAAInfo;
4257 Address Addr = Address::invalid();
4258 if (const VariableArrayType *vla =
4259 getContext().getAsVariableArrayType(E->getType())) {
4260 // The base must be a pointer, which is not an aggregate. Emit
4261 // it. It needs to be emitted first in case it's what captures
4262 // the VLA bounds.
4263 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4264 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4265
4266 // The element count here is the total number of non-VLA elements.
4267 llvm::Value *numElements = getVLASize(vla).NumElts;
4268
4269 // Effectively, the multiply by the VLA size is part of the GEP.
4270 // GEP indexes are signed, and scaling an index isn't permitted to
4271 // signed-overflow, so we use the same semantics for our explicit
4272 // multiply. We suppress this if overflow is not undefined behavior.
4273 if (getLangOpts().isSignedOverflowDefined()) {
4274 Idx = Builder.CreateMul(Idx, numElements);
4275 } else {
4276 Idx = Builder.CreateNSWMul(Idx, numElements);
4277 }
4278
4279 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4280 !getLangOpts().isSignedOverflowDefined(),
4281 SignedIndices, E->getExprLoc());
4282
4283 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4284 // Indexing over an interface, as in "NSString *P; P[4];"
4285
4286 // Emit the base pointer.
4287 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4288 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4289
4290 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4291 llvm::Value *InterfaceSizeVal =
4292 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4293
4294 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4295
4296 // We don't necessarily build correct LLVM struct types for ObjC
4297 // interfaces, so we can't rely on GEP to do this scaling
4298 // correctly, so we need to cast to i8*. FIXME: is this actually
4299 // true? A lot of other things in the fragile ABI would break...
4300 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4301
4302 // Do the GEP.
4303 CharUnits EltAlign =
4304 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4305 llvm::Value *EltPtr =
4306 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4307 ScaledIdx, false, SignedIndices, E->getExprLoc());
4308 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4309 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4310 // If this is A[i] where A is an array, the frontend will have decayed the
4311 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4312 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4313 // "gep x, i" here. Emit one "gep A, 0, i".
4314 assert(Array->getType()->isArrayType() &&
4315 "Array to pointer decay must have array source type!");
4316 LValue ArrayLV;
4317 // For simple multidimensional array indexing, set the 'accessed' flag for
4318 // better bounds-checking of the base expression.
4319 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4320 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4321 else
4322 ArrayLV = EmitLValue(Array);
4323 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4324
4325 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4326 // If the array being accessed has a "counted_by" attribute, generate
4327 // bounds checking code. The "count" field is at the top level of the
4328 // struct or in an anonymous struct, that's also at the top level. Future
4329 // expansions may allow the "count" to reside at any place in the struct,
4330 // but the value of "counted_by" will be a "simple" path to the count,
4331 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4332 // similar to emit the correct GEP.
4333 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4334 getLangOpts().getStrictFlexArraysLevel();
4335
4336 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4337 ME &&
4338 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4340 const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());
4341 if (const FieldDecl *CountFD = FAMDecl->findCountedByField()) {
4342 if (std::optional<int64_t> Diff =
4343 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4344 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4345
4346 // Create a GEP with a byte offset between the FAM and count and
4347 // use that to load the count value.
4349 ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
4350
4351 llvm::Type *CountTy = ConvertType(CountFD->getType());
4352 llvm::Value *Res = Builder.CreateInBoundsGEP(
4353 Int8Ty, Addr.emitRawPointer(*this),
4354 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4355 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4356 ".counted_by.load");
4357
4358 // Now emit the bounds checking.
4359 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4360 Array->getType(), Accessed);
4361 }
4362 }
4363 }
4364 }
4365
4366 // Propagate the alignment from the array itself to the result.
4367 QualType arrayType = Array->getType();
4368 Addr = emitArraySubscriptGEP(
4369 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4370 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4371 E->getExprLoc(), &arrayType, E->getBase());
4372 EltBaseInfo = ArrayLV.getBaseInfo();
4373 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4374 } else {
4375 // The base must be a pointer; emit it with an estimate of its alignment.
4376 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4377 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4378 QualType ptrType = E->getBase()->getType();
4379 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4380 !getLangOpts().isSignedOverflowDefined(),
4381 SignedIndices, E->getExprLoc(), &ptrType,
4382 E->getBase());
4383 }
4384
4385 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4386
4387 if (getLangOpts().ObjC &&
4388 getLangOpts().getGC() != LangOptions::NonGC) {
4391 }
4392 return LV;
4393}
4394
4396 assert(
4397 !E->isIncomplete() &&
4398 "incomplete matrix subscript expressions should be rejected during Sema");
4399 LValue Base = EmitLValue(E->getBase());
4400 llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
4401 llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
4402 llvm::Value *NumRows = Builder.getIntN(
4403 RowIdx->getType()->getScalarSizeInBits(),
4404 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4405 llvm::Value *FinalIdx =
4406 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4407 return LValue::MakeMatrixElt(
4408 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4409 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4410}
4411
4413 LValueBaseInfo &BaseInfo,
4414 TBAAAccessInfo &TBAAInfo,
4415 QualType BaseTy, QualType ElTy,
4416 bool IsLowerBound) {
4417 LValue BaseLVal;
4418 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4419 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4420 if (BaseTy->isArrayType()) {
4421 Address Addr = BaseLVal.getAddress();
4422 BaseInfo = BaseLVal.getBaseInfo();
4423
4424 // If the array type was an incomplete type, we need to make sure
4425 // the decay ends up being the right type.
4426 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4427 Addr = Addr.withElementType(NewTy);
4428
4429 // Note that VLA pointers are always decayed, so we don't need to do
4430 // anything here.
4431 if (!BaseTy->isVariableArrayType()) {
4432 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4433 "Expected pointer to array");
4434 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4435 }
4436
4437 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4438 }
4439 LValueBaseInfo TypeBaseInfo;
4440 TBAAAccessInfo TypeTBAAInfo;
4441 CharUnits Align =
4442 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4443 BaseInfo.mergeForCast(TypeBaseInfo);
4444 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4445 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4446 CGF.ConvertTypeForMem(ElTy), Align);
4447 }
4448 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4449}
4450
4452 bool IsLowerBound) {
4453
4454 assert(!E->isOpenACCArraySection() &&
4455 "OpenACC Array section codegen not implemented");
4456
4458 QualType ResultExprTy;
4459 if (auto *AT = getContext().getAsArrayType(BaseTy))
4460 ResultExprTy = AT->getElementType();
4461 else
4462 ResultExprTy = BaseTy->getPointeeType();
4463 llvm::Value *Idx = nullptr;
4464 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4465 // Requesting lower bound or upper bound, but without provided length and
4466 // without ':' symbol for the default length -> length = 1.
4467 // Idx = LowerBound ?: 0;
4468 if (auto *LowerBound = E->getLowerBound()) {
4469 Idx = Builder.CreateIntCast(
4470 EmitScalarExpr(LowerBound), IntPtrTy,
4471 LowerBound->getType()->hasSignedIntegerRepresentation());
4472 } else
4473 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4474 } else {
4475 // Try to emit length or lower bound as constant. If this is possible, 1
4476 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4477 // IR (LB + Len) - 1.
4478 auto &C = CGM.getContext();
4479 auto *Length = E->getLength();
4480 llvm::APSInt ConstLength;
4481 if (Length) {
4482 // Idx = LowerBound + Length - 1;
4483 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4484 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4485 Length = nullptr;
4486 }
4487 auto *LowerBound = E->getLowerBound();
4488 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4489 if (LowerBound) {
4490 if (std::optional<llvm::APSInt> LB =
4491 LowerBound->getIntegerConstantExpr(C)) {
4492 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4493 LowerBound = nullptr;
4494 }
4495 }
4496 if (!Length)
4497 --ConstLength;
4498 else if (!LowerBound)
4499 --ConstLowerBound;
4500
4501 if (Length || LowerBound) {
4502 auto *LowerBoundVal =
4503 LowerBound
4504 ? Builder.CreateIntCast(
4505 EmitScalarExpr(LowerBound), IntPtrTy,
4506 LowerBound->getType()->hasSignedIntegerRepresentation())
4507 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4508 auto *LengthVal =
4509 Length
4510 ? Builder.CreateIntCast(
4511 EmitScalarExpr(Length), IntPtrTy,
4512 Length->getType()->hasSignedIntegerRepresentation())
4513 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4514 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4515 /*HasNUW=*/false,
4516 !getLangOpts().isSignedOverflowDefined());
4517 if (Length && LowerBound) {
4518 Idx = Builder.CreateSub(
4519 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4520 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4521 }
4522 } else
4523 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4524 } else {
4525 // Idx = ArraySize - 1;
4526 QualType ArrayTy = BaseTy->isPointerType()
4527 ? E->getBase()->IgnoreParenImpCasts()->getType()
4528 : BaseTy;
4529 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4530 Length = VAT->getSizeExpr();
4531 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4532 ConstLength = *L;
4533 Length = nullptr;
4534 }
4535 } else {
4536 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4537 assert(CAT && "unexpected type for array initializer");
4538 ConstLength = CAT->getSize();
4539 }
4540 if (Length) {
4541 auto *LengthVal = Builder.CreateIntCast(
4542 EmitScalarExpr(Length), IntPtrTy,
4543 Length->getType()->hasSignedIntegerRepresentation());
4544 Idx = Builder.CreateSub(
4545 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4546 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4547 } else {
4548 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4549 --ConstLength;
4550 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4551 }
4552 }
4553 }
4554 assert(Idx);
4555
4556 Address EltPtr = Address::invalid();
4557 LValueBaseInfo BaseInfo;
4558 TBAAAccessInfo TBAAInfo;
4559 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4560 // The base must be a pointer, which is not an aggregate. Emit
4561 // it. It needs to be emitted first in case it's what captures
4562 // the VLA bounds.
4563 Address Base =
4564 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4565 BaseTy, VLA->getElementType(), IsLowerBound);
4566 // The element count here is the total number of non-VLA elements.
4567 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4568
4569 // Effectively, the multiply by the VLA size is part of the GEP.
4570 // GEP indexes are signed, and scaling an index isn't permitted to
4571 // signed-overflow, so we use the same semantics for our explicit
4572 // multiply. We suppress this if overflow is not undefined behavior.
4573 if (getLangOpts().isSignedOverflowDefined())
4574 Idx = Builder.CreateMul(Idx, NumElements);
4575 else
4576 Idx = Builder.CreateNSWMul(Idx, NumElements);
4577 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4578 !getLangOpts().isSignedOverflowDefined(),
4579 /*signedIndices=*/false, E->getExprLoc());
4580 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4581 // If this is A[i] where A is an array, the frontend will have decayed the
4582 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4583 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4584 // "gep x, i" here. Emit one "gep A, 0, i".
4585 assert(Array->getType()->isArrayType() &&
4586 "Array to pointer decay must have array source type!");
4587 LValue ArrayLV;
4588 // For simple multidimensional array indexing, set the 'accessed' flag for
4589 // better bounds-checking of the base expression.
4590 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4591 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4592 else
4593 ArrayLV = EmitLValue(Array);
4594
4595 // Propagate the alignment from the array itself to the result.
4596 EltPtr = emitArraySubscriptGEP(
4597 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4598 ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4599 /*signedIndices=*/false, E->getExprLoc());
4600 BaseInfo = ArrayLV.getBaseInfo();
4601 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4602 } else {
4603 Address Base =
4604 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4605 ResultExprTy, IsLowerBound);
4606 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4607 !getLangOpts().isSignedOverflowDefined(),
4608 /*signedIndices=*/false, E->getExprLoc());
4609 }
4610
4611 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4612}
4613
4616 // Emit the base vector as an l-value.
4617 LValue Base;
4618
4619 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4620 if (E->isArrow()) {
4621 // If it is a pointer to a vector, emit the address and form an lvalue with
4622 // it.
4623 LValueBaseInfo BaseInfo;
4624 TBAAAccessInfo TBAAInfo;
4625 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4626 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4627 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4628 Base.getQuals().removeObjCGCAttr();
4629 } else if (E->getBase()->isGLValue()) {
4630 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4631 // emit the base as an lvalue.
4632 assert(E->getBase()->getType()->isVectorType());
4633 Base = EmitLValue(E->getBase());
4634 } else {
4635 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4636 assert(E->getBase()->getType()->isVectorType() &&
4637 "Result must be a vector");
4638 llvm::Value *Vec = EmitScalarExpr(E->getBase());
4639
4640 // Store the vector to memory (because LValue wants an address).
4641 Address VecMem = CreateMemTemp(E->getBase()->getType());
4642 Builder.CreateStore(Vec, VecMem);
4643 Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4645 }
4646
4647 QualType type =
4648 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4649
4650 // Encode the element access list into a vector of unsigned indices.
4652 E->getEncodedElementAccess(Indices);
4653
4654 if (Base.isSimple()) {
4655 llvm::Constant *CV =
4656 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4657 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
4658 Base.getBaseInfo(), TBAAAccessInfo());
4659 }
4660 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4661
4662 llvm::Constant *BaseElts = Base.getExtVectorElts();
4664
4665 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4666 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4667 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4668 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4669 Base.getBaseInfo(), TBAAAccessInfo());
4670}
4671
4674 EmitIgnoredExpr(E->getBase());
4675 return EmitDeclRefLValue(DRE);
4676 }
4677
4678 Expr *BaseExpr = E->getBase();
4679 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4680 LValue BaseLV;
4681 if (E->isArrow()) {
4682 LValueBaseInfo BaseInfo;
4683 TBAAAccessInfo TBAAInfo;
4684 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4685 QualType PtrTy = BaseExpr->getType()->getPointeeType();
4686 SanitizerSet SkippedChecks;
4687 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4688 if (IsBaseCXXThis)
4689 SkippedChecks.set(SanitizerKind::Alignment, true);
4690 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4691 SkippedChecks.set(SanitizerKind::Null, true);
4692 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4693 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4694 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4695 } else
4696 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4697
4698 NamedDecl *ND = E->getMemberDecl();
4699 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4700 LValue LV = EmitLValueForField(BaseLV, Field);
4702 if (getLangOpts().OpenMP) {
4703 // If the member was explicitly marked as nontemporal, mark it as
4704 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4705 // to children as nontemporal too.
4706 if ((IsWrappedCXXThis(BaseExpr) &&
4708 BaseLV.isNontemporal())
4709 LV.setNontemporal(/*Value=*/true);
4710 }
4711 return LV;
4712 }
4713
4714 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4715 return EmitFunctionDeclLValue(*this, E, FD);
4716
4717 llvm_unreachable("Unhandled member declaration!");
4718}
4719
4720/// Given that we are currently emitting a lambda, emit an l-value for
4721/// one of its members.
4722///
4724 llvm::Value *ThisValue) {
4725 bool HasExplicitObjectParameter = false;
4726 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
4727 if (MD) {
4728 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4729 assert(MD->getParent()->isLambda());
4730 assert(MD->getParent() == Field->getParent());
4731 }
4732 LValue LambdaLV;
4733 if (HasExplicitObjectParameter) {
4734 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4735 auto It = LocalDeclMap.find(D);
4736 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4737 Address AddrOfExplicitObject = It->getSecond();
4738 if (D->getType()->isReferenceType())
4739 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4741 else
4742 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4743 D->getType().getNonReferenceType());
4744
4745 // Make sure we have an lvalue to the lambda itself and not a derived class.
4746 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
4747 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
4748 if (ThisTy != LambdaTy) {
4749 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
4751 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
4752 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
4753 LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
4754 }
4755 } else {
4756 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4757 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4758 }
4759 return EmitLValueForField(LambdaLV, Field);
4760}
4761
4763 return EmitLValueForLambdaField(Field, CXXABIThisValue);
4764}
4765
4766/// Get the field index in the debug info. The debug info structure/union
4767/// will ignore the unnamed bitfields.
4769 unsigned FieldIndex) {
4770 unsigned I = 0, Skipped = 0;
4771
4772 for (auto *F : Rec->getDefinition()->fields()) {
4773 if (I == FieldIndex)
4774 break;
4775 if (F->isUnnamedBitField())
4776 Skipped++;
4777 I++;
4778 }
4779
4780 return FieldIndex - Skipped;
4781}
4782
4783/// Get the address of a zero-sized field within a record. The resulting
4784/// address doesn't necessarily have the right type.
4786 const FieldDecl *Field) {
4788 CGF.getContext().getFieldOffset(Field));
4789 if (Offset.isZero())
4790 return Base;
4791 Base = Base.withElementType(CGF.Int8Ty);
4792 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4793}
4794
4795/// Drill down to the storage of a field without walking into
4796/// reference types.
4797///
4798/// The resulting address doesn't necessarily have the right type.
4800 const FieldDecl *field) {
4801 if (isEmptyFieldForLayout(CGF.getContext(), field))
4802 return emitAddrOfZeroSizeField(CGF, base, field);
4803
4804 const RecordDecl *rec = field->getParent();
4805
4806 unsigned idx =
4807 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4808
4809 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4810}
4811
4813 Address addr, const FieldDecl *field) {
4814 const RecordDecl *rec = field->getParent();
4815 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4816 base.getType(), rec->getLocation());
4817
4818 unsigned idx =
4819 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4820
4822 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4823}
4824
4825static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4826 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4827 if (!RD)
4828 return false;
4829
4830 if (RD->isDynamicClass())
4831 return true;
4832
4833 for (const auto &Base : RD->bases())
4834 if (hasAnyVptr(Base.getType(), Context))
4835 return true;
4836
4837 for (const FieldDecl *Field : RD->fields())
4838 if (hasAnyVptr(Field->getType(), Context))
4839 return true;
4840
4841 return false;
4842}
4843
4845 const FieldDecl *field) {
4846 LValueBaseInfo BaseInfo = base.getBaseInfo();
4847
4848 if (field->isBitField()) {
4849 const CGRecordLayout &RL =
4851 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4852 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4853 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4854 Info.VolatileStorageSize != 0 &&
4855 field->getType()
4858 Address Addr = base.getAddress();
4859 unsigned Idx = RL.getLLVMFieldNo(field);
4860 const RecordDecl *rec = field->getParent();
4862 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4863 if (!UseVolatile) {
4864 if (!IsInPreservedAIRegion &&
4865 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4866 if (Idx != 0)
4867 // For structs, we GEP to the field that the record layout suggests.
4868 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4869 } else {
4870 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4871 getContext().getRecordType(rec), rec->getLocation());
4873 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4874 DbgInfo);
4875 }
4876 }
4877 const unsigned SS =
4878 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4879 // Get the access type.
4880 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4881 Addr = Addr.withElementType(FieldIntTy);
4882 if (UseVolatile) {
4883 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4884 if (VolatileOffset)
4885 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4886 }
4887
4888 QualType fieldType =
4889 field->getType().withCVRQualifiers(base.getVRQualifiers());
4890 // TODO: Support TBAA for bit fields.
4891 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4892 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4893 TBAAAccessInfo());
4894 }
4895
4896 // Fields of may-alias structures are may-alias themselves.
4897 // FIXME: this should get propagated down through anonymous structs
4898 // and unions.
4899 QualType FieldType = field->getType();
4900 const RecordDecl *rec = field->getParent();
4901 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4902 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4903 TBAAAccessInfo FieldTBAAInfo;
4904 if (base.getTBAAInfo().isMayAlias() ||
4905 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4906 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4907 } else if (rec->isUnion()) {
4908 // TODO: Support TBAA for unions.
4909 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4910 } else {
4911 // If no base type been assigned for the base access, then try to generate
4912 // one for this base lvalue.
4913 FieldTBAAInfo = base.getTBAAInfo();
4914 if (!FieldTBAAInfo.BaseType) {
4915 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4916 assert(!FieldTBAAInfo.Offset &&
4917 "Nonzero offset for an access with no base type!");
4918 }
4919
4920 // Adjust offset to be relative to the base type.
4921 const ASTRecordLayout &Layout =
4923 unsigned CharWidth = getContext().getCharWidth();
4924 if (FieldTBAAInfo.BaseType)
4925 FieldTBAAInfo.Offset +=
4926 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4927
4928 // Update the final access type and size.
4929 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4930 FieldTBAAInfo.Size =
4932 }
4933
4934 Address addr = base.getAddress();
4936 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4937 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4938 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4939 ClassDef->isDynamicClass()) {
4940 // Getting to any field of dynamic object requires stripping dynamic
4941 // information provided by invariant.group. This is because accessing
4942 // fields may leak the real address of dynamic object, which could result
4943 // in miscompilation when leaked pointer would be compared.
4944 auto *stripped =
4946 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
4947 }
4948 }
4949
4950 unsigned RecordCVR = base.getVRQualifiers();
4951 if (rec->isUnion()) {
4952 // For unions, there is no pointer adjustment.
4953 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4954 hasAnyVptr(FieldType, getContext()))
4955 // Because unions can easily skip invariant.barriers, we need to add
4956 // a barrier every time CXXRecord field with vptr is referenced.
4958
4960 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4961 // Remember the original union field index
4962 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
4963 rec->getLocation());
4964 addr =
4966 addr.emitRawPointer(*this),
4967 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
4968 addr.getElementType(), addr.getAlignment());
4969 }
4970
4971 if (FieldType->isReferenceType())
4972 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4973 } else {
4974 if (!IsInPreservedAIRegion &&
4975 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
4976 // For structs, we GEP to the field that the record layout suggests.
4977 addr = emitAddrOfFieldStorage(*this, addr, field);
4978 else
4979 // Remember the original struct field index
4980 addr = emitPreserveStructAccess(*this, base, addr, field);
4981 }
4982
4983 // If this is a reference field, load the reference right now.
4984 if (FieldType->isReferenceType()) {
4985 LValue RefLVal =
4986 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4987 if (RecordCVR & Qualifiers::Volatile)
4988 RefLVal.getQuals().addVolatile();
4989 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
4990
4991 // Qualifiers on the struct don't apply to the referencee.
4992 RecordCVR = 0;
4993 FieldType = FieldType->getPointeeType();
4994 }
4995
4996 // Make sure that the address is pointing to the right type. This is critical
4997 // for both unions and structs.
4998 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4999
5000 if (field->hasAttr<AnnotateAttr>())
5001 addr = EmitFieldAnnotations(field, addr);
5002
5003 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5004 LV.getQuals().addCVRQualifiers(RecordCVR);
5005
5006 // __weak attribute on a field is ignored.
5009
5010 return LV;
5011}
5012
5013LValue
5015 const FieldDecl *Field) {
5016 QualType FieldType = Field->getType();
5017
5018 if (!FieldType->isReferenceType())
5019 return EmitLValueForField(Base, Field);
5020
5021 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
5022
5023 // Make sure that the address is pointing to the right type.
5024 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5025 V = V.withElementType(llvmType);
5026
5027 // TODO: Generate TBAA information that describes this access as a structure
5028 // member access and not just an access to an object of the field's type. This
5029 // should be similar to what we do in EmitLValueForField().
5030 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5031 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5032 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5033 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5034 CGM.getTBAAInfoForSubobject(Base, FieldType));
5035}
5036
5038 if (E->isFileScope()) {
5040 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5041 }
5043 // make sure to emit the VLA size.
5045
5046 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5047 const Expr *InitExpr = E->getInitializer();
5049
5050 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5051 /*Init*/ true);
5052
5053 // Block-scope compound literals are destroyed at the end of the enclosing
5054 // scope in C.
5055 if (!getLangOpts().CPlusPlus)
5058 E->getType(), getDestroyer(DtorKind),
5059 DtorKind & EHCleanup);
5060
5061 return Result;
5062}
5063
5065 if (!E->isGLValue())
5066 // Initializing an aggregate temporary in C++11: T{...}.
5067 return EmitAggExprToLValue(E);
5068
5069 // An lvalue initializer list must be initializing a reference.
5070 assert(E->isTransparent() && "non-transparent glvalue init list");
5071 return EmitLValue(E->getInit(0));
5072}
5073
5074/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5075/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5076/// LValue is returned and the current block has been terminated.
5077static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5078 const Expr *Operand) {
5079 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5080 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5081 return std::nullopt;
5082 }
5083
5084 return CGF.EmitLValue(Operand);
5085}
5086
5087namespace {
5088// Handle the case where the condition is a constant evaluatable simple integer,
5089// which means we don't have to separately handle the true/false blocks.
5090std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5092 const Expr *condExpr = E->getCond();
5093 bool CondExprBool;
5094 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5095 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5096 if (!CondExprBool)
5097 std::swap(Live, Dead);
5098
5099 if (!CGF.ContainsLabel(Dead)) {
5100 // If the true case is live, we need to track its region.
5101 if (CondExprBool)
5103 // If a throw expression we emit it and return an undefined lvalue
5104 // because it can't be used.
5105 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5106 CGF.EmitCXXThrowExpr(ThrowExpr);
5107 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5108 llvm::Type *Ty = CGF.UnqualPtrTy;
5109 return CGF.MakeAddrLValue(
5110 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5111 Dead->getType());
5112 }
5113 return CGF.EmitLValue(Live);
5114 }
5115 }
5116 return std::nullopt;
5117}
5118struct ConditionalInfo {
5119 llvm::BasicBlock *lhsBlock, *rhsBlock;
5120 std::optional<LValue> LHS, RHS;
5121};
5122
5123// Create and generate the 3 blocks for a conditional operator.
5124// Leaves the 'current block' in the continuation basic block.
5125template<typename FuncTy>
5126ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5128 const FuncTy &BranchGenFunc) {
5129 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5130 CGF.createBasicBlock("cond.false"), std::nullopt,
5131 std::nullopt};
5132 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5133
5134 CodeGenFunction::ConditionalEvaluation eval(CGF);
5135 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5136 CGF.getProfileCount(E));
5137
5138 // Any temporaries created here are conditional.
5139 CGF.EmitBlock(Info.lhsBlock);
5141 eval.begin(CGF);
5142 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5143 eval.end(CGF);
5144 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5145
5146 if (Info.LHS)
5147 CGF.Builder.CreateBr(endBlock);
5148
5149 // Any temporaries created here are conditional.
5150 CGF.EmitBlock(Info.rhsBlock);
5151 eval.begin(CGF);
5152 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5153 eval.end(CGF);
5154 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5155 CGF.EmitBlock(endBlock);
5156
5157 return Info;
5158}
5159} // namespace
5160
5163 if (!E->isGLValue()) {
5164 // ?: here should be an aggregate.
5166 "Unexpected conditional operator!");
5167 return (void)EmitAggExprToLValue(E);
5168 }
5169
5170 OpaqueValueMapping binding(*this, E);
5171 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5172 return;
5173
5174 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5175 CGF.EmitIgnoredExpr(E);
5176 return LValue{};
5177 });
5178}
5181 if (!expr->isGLValue()) {
5182 // ?: here should be an aggregate.
5183 assert(hasAggregateEvaluationKind(expr->getType()) &&
5184 "Unexpected conditional operator!");
5185 return EmitAggExprToLValue(expr);
5186 }
5187
5188 OpaqueValueMapping binding(*this, expr);
5189 if (std::optional<LValue> Res =
5190 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5191 return *Res;
5192
5193 ConditionalInfo Info = EmitConditionalBlocks(
5194 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5195 return EmitLValueOrThrowExpression(CGF, E);
5196 });
5197
5198 if ((Info.LHS && !Info.LHS->isSimple()) ||
5199 (Info.RHS && !Info.RHS->isSimple()))
5200 return EmitUnsupportedLValue(expr, "conditional operator");
5201
5202 if (Info.LHS && Info.RHS) {
5203 Address lhsAddr = Info.LHS->getAddress();
5204 Address rhsAddr = Info.RHS->getAddress();
5206 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5207 Builder.GetInsertBlock(), expr->getType());
5208 AlignmentSource alignSource =
5209 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5210 Info.RHS->getBaseInfo().getAlignmentSource());
5212 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5213 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5214 TBAAInfo);
5215 } else {
5216 assert((Info.LHS || Info.RHS) &&
5217 "both operands of glvalue conditional are throw-expressions?");
5218 return Info.LHS ? *Info.LHS : *Info.RHS;
5219 }
5220}
5221
5222/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5223/// type. If the cast is to a reference, we can have the usual lvalue result,
5224/// otherwise if a cast is needed by the code generator in an lvalue context,
5225/// then it must mean that we need the address of an aggregate in order to
5226/// access one of its members. This can happen for all the reasons that casts
5227/// are permitted with aggregate result, including noop aggregate casts, and
5228/// cast from scalar to union.
5230 switch (E->getCastKind()) {
5231 case CK_ToVoid:
5232 case CK_BitCast:
5233 case CK_LValueToRValueBitCast:
5234 case CK_ArrayToPointerDecay:
5235 case CK_FunctionToPointerDecay:
5236 case CK_NullToMemberPointer:
5237 case CK_NullToPointer:
5238 case CK_IntegralToPointer:
5239 case CK_PointerToIntegral:
5240 case CK_PointerToBoolean:
5241 case CK_IntegralCast:
5242 case CK_BooleanToSignedIntegral:
5243 case CK_IntegralToBoolean:
5244 case CK_IntegralToFloating:
5245 case CK_FloatingToIntegral:
5246 case CK_FloatingToBoolean:
5247 case CK_FloatingCast:
5248 case CK_FloatingRealToComplex:
5249 case CK_FloatingComplexToReal:
5250 case CK_FloatingComplexToBoolean:
5251 case CK_FloatingComplexCast:
5252 case CK_FloatingComplexToIntegralComplex:
5253 case CK_IntegralRealToComplex:
5254 case CK_IntegralComplexToReal:
5255 case CK_IntegralComplexToBoolean:
5256 case CK_IntegralComplexCast:
5257 case CK_IntegralComplexToFloatingComplex:
5258 case CK_DerivedToBaseMemberPointer:
5259 case CK_BaseToDerivedMemberPointer:
5260 case CK_MemberPointerToBoolean:
5261 case CK_ReinterpretMemberPointer:
5262 case CK_AnyPointerToBlockPointerCast:
5263 case CK_ARCProduceObject:
5264 case CK_ARCConsumeObject:
5265 case CK_ARCReclaimReturnedObject:
5266 case CK_ARCExtendBlockObject:
5267 case CK_CopyAndAutoreleaseBlockObject:
5268 case CK_IntToOCLSampler:
5269 case CK_FloatingToFixedPoint:
5270 case CK_FixedPointToFloating:
5271 case CK_FixedPointCast:
5272 case CK_FixedPointToBoolean:
5273 case CK_FixedPointToIntegral:
5274 case CK_IntegralToFixedPoint:
5275 case CK_MatrixCast:
5276 case CK_HLSLVectorTruncation:
5277 case CK_HLSLArrayRValue:
5278 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5279
5280 case CK_Dependent:
5281 llvm_unreachable("dependent cast kind in IR gen!");
5282
5283 case CK_BuiltinFnToFnPtr:
5284 llvm_unreachable("builtin functions are handled elsewhere");
5285
5286 // These are never l-values; just use the aggregate emission code.
5287 case CK_NonAtomicToAtomic:
5288 case CK_AtomicToNonAtomic:
5289 return EmitAggExprToLValue(E);
5290
5291 case CK_Dynamic: {
5292 LValue LV = EmitLValue(E->getSubExpr());
5293 Address V = LV.getAddress();
5294 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5296 }
5297
5298 case CK_ConstructorConversion:
5299 case CK_UserDefinedConversion:
5300 case CK_CPointerToObjCPointerCast:
5301 case CK_BlockPointerToObjCPointerCast:
5302 case CK_LValueToRValue:
5303 return EmitLValue(E->getSubExpr());
5304
5305 case CK_NoOp: {
5306 // CK_NoOp can model a qualification conversion, which can remove an array
5307 // bound and change the IR type.
5308 // FIXME: Once pointee types are removed from IR, remove this.
5309 LValue LV = EmitLValue(E->getSubExpr());
5310 // Propagate the volatile qualifer to LValue, if exist in E.
5311 if (E->changesVolatileQualification())
5312 LV.getQuals() = E->getType().getQualifiers();
5313 if (LV.isSimple()) {
5314 Address V = LV.getAddress();
5315 if (V.isValid()) {
5316 llvm::Type *T = ConvertTypeForMem(E->getType());
5317 if (V.getElementType() != T)
5318 LV.setAddress(V.withElementType(T));
5319 }
5320 }
5321 return LV;
5322 }
5323
5324 case CK_UncheckedDerivedToBase:
5325 case CK_DerivedToBase: {
5326 const auto *DerivedClassTy =
5327 E->getSubExpr()->getType()->castAs<RecordType>();
5328 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5329
5330 LValue LV = EmitLValue(E->getSubExpr());
5331 Address This = LV.getAddress();
5332
5333 // Perform the derived-to-base conversion
5335 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5336 /*NullCheckValue=*/false, E->getExprLoc());
5337
5338 // TODO: Support accesses to members of base classes in TBAA. For now, we
5339 // conservatively pretend that the complete object is of the base class
5340 // type.
5341 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5343 }
5344 case CK_ToUnion:
5345 return EmitAggExprToLValue(E);
5346 case CK_BaseToDerived: {
5347 const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5348 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5349
5350 LValue LV = EmitLValue(E->getSubExpr());
5351
5352 // Perform the base-to-derived conversion
5354 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5355 /*NullCheckValue=*/false);
5356
5357 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5358 // performed and the object is not of the derived type.
5361 E->getType());
5362
5363 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5365 /*MayBeNull=*/false, CFITCK_DerivedCast,
5366 E->getBeginLoc());
5367
5368 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5370 }
5371 case CK_LValueBitCast: {
5372 // This must be a reinterpret_cast (or c-style equivalent).
5373 const auto *CE = cast<ExplicitCastExpr>(E);
5374
5375 CGM.EmitExplicitCastExprType(CE, this);
5376 LValue LV = EmitLValue(E->getSubExpr());
5378 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5379
5380 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5382 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5383 E->getBeginLoc());
5384
5385 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5387 }
5388 case CK_AddressSpaceConversion: {
5389 LValue LV = EmitLValue(E->getSubExpr());
5391 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5392 *this, LV.getPointer(*this),
5393 E->getSubExpr()->getType().getAddressSpace(),
5394 E->getType().getAddressSpace(), ConvertType(DestTy));
5396 LV.getAddress().getAlignment()),
5397 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5398 }
5399 case CK_ObjCObjectLValueCast: {
5400 LValue LV = EmitLValue(E->getSubExpr());
5402 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5404 }
5405 case CK_ZeroToOCLOpaqueType:
5406 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5407
5408 case CK_VectorSplat: {
5409 // LValue results of vector splats are only supported in HLSL.
5410 if (!getLangOpts().HLSL)
5411 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5412 return EmitLValue(E->getSubExpr());
5413 }
5414 }
5415
5416 llvm_unreachable("Unhandled lvalue cast kind?");
5417}
5418
5422}
5423
5424LValue
5427
5428 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5429 it = OpaqueLValues.find(e);
5430
5431 if (it != OpaqueLValues.end())
5432 return it->second;
5433
5434 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5435 return EmitLValue(e->getSourceExpr());
5436}
5437
5438RValue
5441
5442 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5443 it = OpaqueRValues.find(e);
5444
5445 if (it != OpaqueRValues.end())
5446 return it->second;
5447
5448 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5449 return EmitAnyExpr(e->getSourceExpr());
5450}
5451
5453 const FieldDecl *FD,
5455 QualType FT = FD->getType();
5456 LValue FieldLV = EmitLValueForField(LV, FD);
5457 switch (getEvaluationKind(FT)) {
5458 case TEK_Complex:
5459 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5460 case TEK_Aggregate:
5461 return FieldLV.asAggregateRValue();
5462 case TEK_Scalar:
5463 // This routine is used to load fields one-by-one to perform a copy, so
5464 // don't load reference fields.
5465 if (FD->getType()->isReferenceType())
5466 return RValue::get(FieldLV.getPointer(*this));
5467 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5468 // primitive load.
5469 if (FieldLV.isBitField())
5470 return EmitLoadOfLValue(FieldLV, Loc);
5471 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5472 }
5473 llvm_unreachable("bad evaluation kind");
5474}
5475
5476//===--------------------------------------------------------------------===//
5477// Expression Emission
5478//===--------------------------------------------------------------------===//
5479
5481 ReturnValueSlot ReturnValue) {
5482 // Builtins never have block type.
5483 if (E->getCallee()->getType()->isBlockPointerType())
5485
5486 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5488
5489 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5491
5492 // A CXXOperatorCallExpr is created even for explicit object methods, but
5493 // these should be treated like static function call.
5494 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5495 if (const auto *MD =
5496 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5497 MD && MD->isImplicitObjectMemberFunction())
5499
5500 CGCallee callee = EmitCallee(E->getCallee());
5501
5502 if (callee.isBuiltin()) {
5503 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5504 E, ReturnValue);
5505 }
5506
5507 if (callee.isPseudoDestructor()) {
5509 }
5510
5511 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
5512}
5513
5514/// Emit a CallExpr without considering whether it might be a subclass.
5516 ReturnValueSlot ReturnValue) {
5517 CGCallee Callee = EmitCallee(E->getCallee());
5518 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
5519}
5520
5521// Detect the unusual situation where an inline version is shadowed by a
5522// non-inline version. In that case we should pick the external one
5523// everywhere. That's GCC behavior too.
5525 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5526 if (!PD->isInlineBuiltinDeclaration())
5527 return false;
5528 return true;
5529}
5530
5532 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5533
5534 if (auto builtinID = FD->getBuiltinID()) {
5535 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5536 std::string NoBuiltins = "no-builtins";
5537
5538 StringRef Ident = CGF.CGM.getMangledName(GD);
5539 std::string FDInlineName = (Ident + ".inline").str();
5540
5541 bool IsPredefinedLibFunction =
5543 bool HasAttributeNoBuiltin =
5544 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5545 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5546
5547 // When directing calling an inline builtin, call it through it's mangled
5548 // name to make it clear it's not the actual builtin.
5549 if (CGF.CurFn->getName() != FDInlineName &&
5551 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5552 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5553 llvm::Module *M = Fn->getParent();
5554 llvm::Function *Clone = M->getFunction(FDInlineName);
5555 if (!Clone) {
5556 Clone = llvm::Function::Create(Fn->getFunctionType(),
5557 llvm::GlobalValue::InternalLinkage,
5558 Fn->getAddressSpace(), FDInlineName, M);
5559 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5560 }
5561 return CGCallee::forDirect(Clone, GD);
5562 }
5563
5564 // Replaceable builtins provide their own implementation of a builtin. If we
5565 // are in an inline builtin implementation, avoid trivial infinite
5566 // recursion. Honor __attribute__((no_builtin("foo"))) or
5567 // __attribute__((no_builtin)) on the current function unless foo is
5568 // not a predefined library function which means we must generate the
5569 // builtin no matter what.
5570 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5571 return CGCallee::forBuiltin(builtinID, FD);
5572 }
5573
5574 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5575 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5576 FD->hasAttr<CUDAGlobalAttr>())
5577 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5578 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5579
5580 return CGCallee::forDirect(CalleePtr, GD);
5581}
5582
5584 E = E->IgnoreParens();
5585
5586 // Look through function-to-pointer decay.
5587 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5588 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5589 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5590 return EmitCallee(ICE->getSubExpr());
5591 }
5592
5593 // Resolve direct calls.
5594 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5595 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5596 return EmitDirectCallee(*this, FD);
5597 }
5598 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5599 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5600 EmitIgnoredExpr(ME->getBase());
5601 return EmitDirectCallee(*this, FD);
5602 }
5603
5604 // Look through template substitutions.
5605 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5606 return EmitCallee(NTTP->getReplacement());
5607
5608 // Treat pseudo-destructor calls differently.
5609 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5611 }
5612
5613 // Otherwise, we have an indirect reference.
5614 llvm::Value *calleePtr;
5616 if (auto ptrType = E->getType()->getAs<PointerType>()) {
5617 calleePtr = EmitScalarExpr(E);
5618 functionType = ptrType->getPointeeType();
5619 } else {
5620 functionType = E->getType();
5621 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5622 }
5623 assert(functionType->isFunctionType());
5624
5625 GlobalDecl GD;
5626 if (const auto *VD =
5627 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5628 GD = GlobalDecl(VD);
5629
5630 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5632 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
5633 return callee;
5634}
5635
5637 // Comma expressions just emit their LHS then their RHS as an l-value.
5638 if (E->getOpcode() == BO_Comma) {
5639 EmitIgnoredExpr(E->getLHS());
5641 return EmitLValue(E->getRHS());
5642 }
5643
5644 if (E->getOpcode() == BO_PtrMemD ||
5645 E->getOpcode() == BO_PtrMemI)
5647
5648 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5649
5650 // Note that in all of these cases, __block variables need the RHS
5651 // evaluated first just in case the variable gets moved by the RHS.
5652
5653 switch (getEvaluationKind(E->getType())) {
5654 case TEK_Scalar: {
5655 switch (E->getLHS()->getType().getObjCLifetime()) {
5657 return EmitARCStoreStrong(E, /*ignored*/ false).first;
5658
5660 return EmitARCStoreAutoreleasing(E).first;
5661
5662 // No reason to do any of these differently.
5666 break;
5667 }
5668
5669 // TODO: Can we de-duplicate this code with the corresponding code in
5670 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5671 RValue RV;
5672 llvm::Value *Previous = nullptr;
5673 QualType SrcType = E->getRHS()->getType();
5674 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5675 // we want to extract that value and potentially (if the bitfield sanitizer
5676 // is enabled) use it to check for an implicit conversion.
5677 if (E->getLHS()->refersToBitField()) {
5678 llvm::Value *RHS =
5680 RV = RValue::get(RHS);
5681 } else
5682 RV = EmitAnyExpr(E->getRHS());
5683
5684 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
5685
5686 if (RV.isScalar())
5688
5689 if (LV.isBitField()) {
5690 llvm::Value *Result = nullptr;
5691 // If bitfield sanitizers are enabled we want to use the result
5692 // to check whether a truncation or sign change has occurred.
5693 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5695 else
5697
5698 // If the expression contained an implicit conversion, make sure
5699 // to use the value before the scalar conversion.
5700 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5701 QualType DstType = E->getLHS()->getType();
5702 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5703 LV.getBitFieldInfo(), E->getExprLoc());
5704 } else
5705 EmitStoreThroughLValue(RV, LV);
5706
5707 if (getLangOpts().OpenMP)
5709 E->getLHS());
5710 return LV;
5711 }
5712
5713 case TEK_Complex:
5715
5716 case TEK_Aggregate:
5717 return EmitAggExprToLValue(E);
5718 }
5719 llvm_unreachable("bad evaluation kind");
5720}
5721
5723 RValue RV = EmitCallExpr(E);
5724
5725 if (!RV.isScalar())
5728
5729 assert(E->getCallReturnType(getContext())->isReferenceType() &&
5730 "Can't have a scalar return unless the return type is a "
5731 "reference type!");
5732
5734}
5735
5737 // FIXME: This shouldn't require another copy.
5738 return EmitAggExprToLValue(E);
5739}
5740
5743 && "binding l-value to type which needs a temporary");
5745 EmitCXXConstructExpr(E, Slot);
5747}
5748
5749LValue
5752}
5753
5755 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
5757}
5758
5762}
5763
5764LValue
5766 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5768 EmitAggExpr(E->getSubExpr(), Slot);
5769 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5771}
5772
5775
5776 if (!RV.isScalar())
5779
5780 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5781 "Can't have a scalar return unless the return type is a "
5782 "reference type!");
5783
5785}
5786
5788 Address V =
5789 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
5791}
5792
5794 const ObjCIvarDecl *Ivar) {
5795 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5796}
5797
5798llvm::Value *
5800 const ObjCIvarDecl *Ivar) {
5801 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5802 QualType PointerDiffType = getContext().getPointerDiffType();
5803 return Builder.CreateZExtOrTrunc(OffsetValue,
5804 getTypes().ConvertType(PointerDiffType));
5805}
5806
5808 llvm::Value *BaseValue,
5809 const ObjCIvarDecl *Ivar,
5810 unsigned CVRQualifiers) {
5811 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5812 Ivar, CVRQualifiers);
5813}
5814
5816 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5817 llvm::Value *BaseValue = nullptr;
5818 const Expr *BaseExpr = E->getBase();
5819 Qualifiers BaseQuals;
5820 QualType ObjectTy;
5821 if (E->isArrow()) {
5822 BaseValue = EmitScalarExpr(BaseExpr);
5823 ObjectTy = BaseExpr->getType()->getPointeeType();
5824 BaseQuals = ObjectTy.getQualifiers();
5825 } else {
5826 LValue BaseLV = EmitLValue(BaseExpr);
5827 BaseValue = BaseLV.getPointer(*this);
5828 ObjectTy = BaseExpr->getType();
5829 BaseQuals = ObjectTy.getQualifiers();
5830 }
5831
5832 LValue LV =
5833 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5834 BaseQuals.getCVRQualifiers());
5836 return LV;
5837}
5838
5840 // Can only get l-value for message expression returning aggregate type
5844}
5845
5846RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
5847 const CallExpr *E, ReturnValueSlot ReturnValue,
5848 llvm::Value *Chain) {
5849 // Get the actual function type. The callee type will always be a pointer to
5850 // function type or a block pointer type.
5851 assert(CalleeType->isFunctionPointerType() &&
5852 "Call must have function pointer type!");
5853
5854 const Decl *TargetDecl =
5855 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5856
5857 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5858 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5859 "trying to emit a call to an immediate function");
5860
5861 CalleeType = getContext().getCanonicalType(CalleeType);
5862
5863 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5864
5865 CGCallee Callee = OrigCallee;
5866
5867 if (SanOpts.has(SanitizerKind::Function) &&
5868 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
5869 !isa<FunctionNoProtoType>(PointeeType)) {
5870 if (llvm::Constant *PrefixSig =
5872 SanitizerScope SanScope(this);
5873 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
5874
5875 llvm::Type *PrefixSigType = PrefixSig->getType();
5876 llvm::StructType *PrefixStructTy = llvm::StructType::get(
5877 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
5878
5879 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5881 // Use raw pointer since we are using the callee pointer as data here.
5882 Address Addr =
5883 Address(CalleePtr, CalleePtr->getType(),
5885 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
5886 Callee.getPointerAuthInfo(), nullptr);
5887 CalleePtr = Addr.emitRawPointer(*this);
5888 }
5889
5890 // On 32-bit Arm, the low bit of a function pointer indicates whether
5891 // it's using the Arm or Thumb instruction set. The actual first
5892 // instruction lives at the same address either way, so we must clear
5893 // that low bit before using the function address to find the prefix
5894 // structure.
5895 //
5896 // This applies to both Arm and Thumb target triples, because
5897 // either one could be used in an interworking context where it
5898 // might be passed function pointers of both types.
5899 llvm::Value *AlignedCalleePtr;
5900 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
5901 llvm::Value *CalleeAddress =
5902 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
5903 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
5904 llvm::Value *AlignedCalleeAddress =
5905 Builder.CreateAnd(CalleeAddress, Mask);
5906 AlignedCalleePtr =
5907 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
5908 } else {
5909 AlignedCalleePtr = CalleePtr;
5910 }
5911
5912 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
5913 llvm::Value *CalleeSigPtr =
5914 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
5915 llvm::Value *CalleeSig =
5916 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
5917 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
5918
5919 llvm::BasicBlock *Cont = createBasicBlock("cont");
5920 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
5921 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
5922
5923 EmitBlock(TypeCheck);
5924 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
5925 Int32Ty,
5926 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
5927 getPointerAlign());
5928 llvm::Value *CalleeTypeHashMatch =
5929 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
5930 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
5931 EmitCheckTypeDescriptor(CalleeType)};
5932 EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function),
5933 SanitizerHandler::FunctionTypeMismatch, StaticData,
5934 {CalleePtr});
5935
5936 Builder.CreateBr(Cont);
5937 EmitBlock(Cont);
5938 }
5939 }
5940
5941 const auto *FnType = cast<FunctionType>(PointeeType);
5942
5943 // If we are checking indirect calls and this call is indirect, check that the
5944 // function pointer is a member of the bit set for the function type.
5945 if (SanOpts.has(SanitizerKind::CFIICall) &&
5946 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5947 SanitizerScope SanScope(this);
5948 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
5949
5950 llvm::Metadata *MD;
5951 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
5953 else
5955
5956 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
5957
5958 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5959 llvm::Value *TypeTest = Builder.CreateCall(
5960 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
5961
5962 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
5963 llvm::Constant *StaticData[] = {
5964 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
5967 };
5968 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
5969 EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
5970 CalleePtr, StaticData);
5971 } else {
5972 EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
5973 SanitizerHandler::CFICheckFail, StaticData,
5974 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
5975 }
5976 }
5977
5978 CallArgList Args;
5979 if (Chain)
5980 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
5981
5982 // C++17 requires that we evaluate arguments to a call using assignment syntax
5983 // right-to-left, and that we evaluate arguments to certain other operators
5984 // left-to-right. Note that we allow this to override the order dictated by
5985 // the calling convention on the MS ABI, which means that parameter
5986 // destruction order is not necessarily reverse construction order.
5987 // FIXME: Revisit this based on C++ committee response to unimplementability.
5989 bool StaticOperator = false;
5990 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
5991 if (OCE->isAssignmentOp())
5993 else {
5994 switch (OCE->getOperator()) {
5995 case OO_LessLess:
5996 case OO_GreaterGreater:
5997 case OO_AmpAmp:
5998 case OO_PipePipe:
5999 case OO_Comma:
6000 case OO_ArrowStar:
6002 break;
6003 default:
6004 break;
6005 }
6006 }
6007
6008 if (const auto *MD =
6009 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6010 MD && MD->isStatic())
6011 StaticOperator = true;
6012 }
6013
6014 auto Arguments = E->arguments();
6015 if (StaticOperator) {
6016 // If we're calling a static operator, we need to emit the object argument
6017 // and ignore it.
6018 EmitIgnoredExpr(E->getArg(0));
6019 Arguments = drop_begin(Arguments, 1);
6020 }
6021 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6022 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6023
6025 Args, FnType, /*ChainCall=*/Chain);
6026
6027 // C99 6.5.2.2p6:
6028 // If the expression that denotes the called function has a type
6029 // that does not include a prototype, [the default argument
6030 // promotions are performed]. If the number of arguments does not
6031 // equal the number of parameters, the behavior is undefined. If
6032 // the function is defined with a type that includes a prototype,
6033 // and either the prototype ends with an ellipsis (, ...) or the
6034 // types of the arguments after promotion are not compatible with
6035 // the types of the parameters, the behavior is undefined. If the
6036 // function is defined with a type that does not include a
6037 // prototype, and the types of the arguments after promotion are
6038 // not compatible with those of the parameters after promotion,
6039 // the behavior is undefined [except in some trivial cases].
6040 // That is, in the general case, we should assume that a call
6041 // through an unprototyped function type works like a *non-variadic*
6042 // call. The way we make this work is to cast to the exact type
6043 // of the promoted arguments.
6044 //
6045 // Chain calls use this same code path to add the invisible chain parameter
6046 // to the function type.
6047 if (isa<FunctionNoProtoType>(FnType) || Chain) {
6048 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
6049 int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace();
6050 CalleeTy = CalleeTy->getPointerTo(AS);
6051
6052 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6053 CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
6054 Callee.setFunctionPointer(CalleePtr);
6055 }
6056
6057 // HIP function pointer contains kernel handle when it is used in triple
6058 // chevron. The kernel stub needs to be loaded from kernel handle and used
6059 // as callee.
6060 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6061 isa<CUDAKernelCallExpr>(E) &&
6062 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6063 llvm::Value *Handle = Callee.getFunctionPointer();
6064 auto *Stub = Builder.CreateLoad(
6065 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6066 Callee.setFunctionPointer(Stub);
6067 }
6068 llvm::CallBase *CallOrInvoke = nullptr;
6069 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,
6070 E == MustTailCall, E->getExprLoc());
6071
6072 // Generate function declaration DISuprogram in order to be used
6073 // in debug info about call sites.
6074 if (CGDebugInfo *DI = getDebugInfo()) {
6075 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6076 FunctionArgList Args;
6077 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6078 DI->EmitFuncDeclForCallSite(CallOrInvoke,
6079 DI->getFunctionType(CalleeDecl, ResTy, Args),
6080 CalleeDecl);
6081 }
6082 }
6083
6084 return Call;
6085}
6086
6089 Address BaseAddr = Address::invalid();
6090 if (E->getOpcode() == BO_PtrMemI) {
6091 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6092 } else {
6093 BaseAddr = EmitLValue(E->getLHS()).getAddress();
6094 }
6095
6096 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6097 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6098
6099 LValueBaseInfo BaseInfo;
6100 TBAAAccessInfo TBAAInfo;
6101 Address MemberAddr =
6102 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6103 &TBAAInfo);
6104
6105 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6106}
6107
6108/// Given the address of a temporary variable, produce an r-value of
6109/// its type.
6111 QualType type,
6112 SourceLocation loc) {
6114 switch (getEvaluationKind(type)) {
6115 case TEK_Complex:
6116 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6117 case TEK_Aggregate:
6118 return lvalue.asAggregateRValue();
6119 case TEK_Scalar:
6120 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6121 }
6122 llvm_unreachable("bad evaluation kind");
6123}
6124
6125void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6126 assert(Val->getType()->isFPOrFPVectorTy());
6127 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6128 return;
6129
6130 llvm::MDBuilder MDHelper(getLLVMContext());
6131 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6132
6133 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6134}
6135
6136void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6137 llvm::Type *EltTy = Val->getType()->getScalarType();
6138 if (!EltTy->isFloatTy())
6139 return;
6140
6141 if ((getLangOpts().OpenCL &&
6142 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6143 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6144 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6145 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6146 //
6147 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6148 // build option allows an application to specify that single precision
6149 // floating-point divide (x/y and 1/x) and sqrt used in the program
6150 // source are correctly rounded.
6151 //
6152 // TODO: CUDA has a prec-sqrt flag
6153 SetFPAccuracy(Val, 3.0f);
6154 }
6155}
6156
6157void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6158 llvm::Type *EltTy = Val->getType()->getScalarType();
6159 if (!EltTy->isFloatTy())
6160 return;
6161
6162 if ((getLangOpts().OpenCL &&
6163 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6164 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6165 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6166 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6167 //
6168 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6169 // build option allows an application to specify that single precision
6170 // floating-point divide (x/y and 1/x) and sqrt used in the program
6171 // source are correctly rounded.
6172 //
6173 // TODO: CUDA has a prec-div flag
6174 SetFPAccuracy(Val, 2.5f);
6175 }
6176}
6177
6178namespace {
6179 struct LValueOrRValue {
6180 LValue LV;
6181 RValue RV;
6182 };
6183}
6184
6185static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6186 const PseudoObjectExpr *E,
6187 bool forLValue,
6188 AggValueSlot slot) {
6190
6191 // Find the result expression, if any.
6192 const Expr *resultExpr = E->getResultExpr();
6193 LValueOrRValue result;
6194
6196 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6197 const Expr *semantic = *i;
6198
6199 // If this semantic expression is an opaque value, bind it
6200 // to the result of its source expression.
6201 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6202 // Skip unique OVEs.
6203 if (ov->isUnique()) {
6204 assert(ov != resultExpr &&
6205 "A unique OVE cannot be used as the result expression");
6206 continue;
6207 }
6208
6209 // If this is the result expression, we may need to evaluate
6210 // directly into the slot.
6211 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6212 OVMA opaqueData;
6213 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6215 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6216 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6218 opaqueData = OVMA::bind(CGF, ov, LV);
6219 result.RV = slot.asRValue();
6220
6221 // Otherwise, emit as normal.
6222 } else {
6223 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6224
6225 // If this is the result, also evaluate the result now.
6226 if (ov == resultExpr) {
6227 if (forLValue)
6228 result.LV = CGF.EmitLValue(ov);
6229 else
6230 result.RV = CGF.EmitAnyExpr(ov, slot);
6231 }
6232 }
6233
6234 opaques.push_back(opaqueData);
6235
6236 // Otherwise, if the expression is the result, evaluate it
6237 // and remember the result.
6238 } else if (semantic == resultExpr) {
6239 if (forLValue)
6240 result.LV = CGF.EmitLValue(semantic);
6241 else
6242 result.RV = CGF.EmitAnyExpr(semantic, slot);
6243
6244 // Otherwise, evaluate the expression in an ignored context.
6245 } else {
6246 CGF.EmitIgnoredExpr(semantic);
6247 }
6248 }
6249
6250 // Unbind all the opaques now.
6251 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6252 opaques[i].unbind(CGF);
6253
6254 return result;
6255}
6256
6258 AggValueSlot slot) {
6259 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6260}
6261
6263 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6264}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3341
DynTypedNode Node
Defines enum values for all the target-independent builtin functions.
CodeGenFunction::ComplexPairTy ComplexPairTy
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition: CGExpr.cpp:2669
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition: CGExpr.cpp:2912
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition: CGExpr.cpp:672
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition: CGExpr.cpp:3961
static llvm::cl::opt< bool > ClSanitizeGuardChecks("ubsan-guard-checks", llvm::cl::Optional, llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."))
static bool hasBooleanRepresentation(QualType Ty)
Definition: CGExpr.cpp:1851
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition: CGExpr.cpp:4145
static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind)
Definition: CGExpr.cpp:3486
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition: CGExpr.cpp:4031
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field)
Drill down to the storage of a field without walking into reference types.
Definition: CGExpr.cpp:4799
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type? This is different from pr...
Definition: CGExpr.cpp:1705
@ CEK_AsReferenceOnly
Definition: CGExpr.cpp:1707
@ CEK_AsValueOnly
Definition: CGExpr.cpp:1709
@ CEK_None
Definition: CGExpr.cpp:1706
@ CEK_AsValueOrReference
Definition: CGExpr.cpp:1708
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition: CGExpr.cpp:1680
static QualType getFixedSizeElementType(const ASTContext &ctx, const VariableArrayType *vla)
Definition: CGExpr.cpp:4022
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition: CGExpr.cpp:2900
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition: CGExpr.cpp:5077
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition: CGExpr.cpp:3975
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition: CGExpr.cpp:2891
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition: CGExpr.cpp:2070
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition: CGExpr.cpp:6185
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition: CGExpr.cpp:4047
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, const MemberExpr *ME)
Definition: CGExpr.cpp:1815
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition: CGExpr.cpp:942
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2167
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition: CGExpr.cpp:1711
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition: CGExpr.cpp:1514
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition: CGExpr.cpp:1864
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition: CGExpr.cpp:4176
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition: CGExpr.cpp:5531
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition: CGExpr.cpp:2766
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition: CGExpr.cpp:1090
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition: CGExpr.cpp:5524
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition: CGExpr.cpp:2840
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition: CGExpr.cpp:4825
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition: CGExpr.cpp:4060
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition: CGExpr.cpp:2780
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD)
Determine whether we can emit a reference to VD from the current context, despite not necessarily hav...
Definition: CGExpr.cpp:2937
VariableTypeDescriptorKind
Definition: CGExpr.cpp:74
@ TK_Float
A floating-point type.
Definition: CGExpr.cpp:78
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition: CGExpr.cpp:82
@ TK_Integer
An integer type.
Definition: CGExpr.cpp:76
@ TK_BitInt
An _BitInt(N) type.
Definition: CGExpr.cpp:80
static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize)
Definition: CGExpr.cpp:4007
static llvm::cl::opt< bool > ClSanitizeDebugDeoptimization("ubsan-unique-traps", llvm::cl::Optional, llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."))
static RawAddress createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner, RawAddress *Alloca=nullptr)
Definition: CGExpr.cpp:419
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CGExpr.cpp:466
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2095
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1258
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition: CGExpr.cpp:4812
const SanitizerHandlerInfo SanitizerHandlers[]
Definition: CGExpr.cpp:3503
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field)
Get the address of a zero-sized field within a record.
Definition: CGExpr.cpp:4785
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB)
Definition: CGExpr.cpp:3509
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition: CGExpr.cpp:4412
static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, Address ReferenceTemporary)
Definition: CGExpr.cpp:303
const Decl * D
Expr * E
StringRef Filename
Definition: Format.cpp:3001
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
StateNode * Previous
const LValueBase getLValueBase() const
Definition: APValue.cpp:974
bool isLValue() const
Definition: APValue.h:406
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:187
SourceManager & getSourceManager()
Definition: ASTContext.h:721
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2628
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1146
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:662
const LangOptions & getLangOpts() const
Definition: ASTContext.h:797
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
Definition: ASTContext.h:1120
const NoSanitizeList & getNoSanitizeList() const
Definition: ASTContext.h:807
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
Definition: ASTContext.h:1209
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2394
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1119
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2828
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2398
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4175
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6926
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition: Expr.cpp:5141
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2674
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3566
QualType getElementType() const
Definition: Type.h:3578
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3860
A fixed int type of a specified bitwidth.
Definition: Type.h:7633
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition: Builtins.h:160
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2803
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1371
bool isDynamicClass() const
Definition: DeclCXX.h:586
bool hasDefinition() const
Definition: DeclCXX.h:572
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1066
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2830
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3498
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition: CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
SanitizerSet SanitizeRecover
Set of sanitizer checks that are non-fatal (i.e.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:193
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:259
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:231
Address setKnownNonNull()
Definition: Address.h:236
void setAlignment(CharUnits Value)
Definition: Address.h:191
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:181
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
RValue asRValue() const
Definition: CGValue.h:666
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:855
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:135
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:304
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:291
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:202
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition: CGBuilder.h:330
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:240
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:218
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:107
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:411
Address CreateLaunderInvariantGroup(Address Addr)
Definition: CGBuilder.h:435
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:127
Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:427
Address CreateStripInvariantGroup(Address Addr)
Definition: CGBuilder.h:441
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:188
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:260
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:344
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:113
Abstract information about a function or function prototype.
Definition: CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:59
All available information about a concrete callee.
Definition: CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition: CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition: CGCall.h:172
bool isPseudoDestructor() const
Definition: CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition: CGCall.h:123
unsigned getBuiltinID() const
Definition: CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
bool isBuiltin() const
Definition: CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition: CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition: CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, llvm::Value *ivarOffset)=0
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)=0
virtual llvm::Value * EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)=0
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, Address AddrWeakObj)=0
virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel)=0
Get the address of a selector for the specified name and type values.
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, bool threadlocal=false)=0
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
bool isNontemporalDecl(const ValueDecl *VD) const
Checks if the VD variable is marked as nontemporal declaration in current context.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:298
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitARCLoadWeakRetained(Address addr)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
LValue EmitInitListLValue(const InitListExpr *E)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Address EmitExtVectorElementLValue(LValue V)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitCallExprLValue(const CallExpr *E)
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
LValue EmitCoyieldLValue(const CoyieldExpr *E)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
const TargetCodeGenInfo & getTargetHooks() const
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
LValue EmitMemberExpr(const MemberExpr *E)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitARCInitWeak(Address addr, llvm::Value *value)
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
LValue EmitStringLiteralLValue(const StringLiteral *E)
static Destroyer destroyARCStrongPrecise
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
static Destroyer destroyARCStrongImprecise
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
This class organizes the cross-function state that is used while generating LLVM code.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD)
Get the address of a GUID.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1244
void setDSOLocal(llvm::GlobalValue *GV) const
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
CGDebugInfo * getModuleDebugInfo()
ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E)
Returns a pointer to a constant global variable for the given file-scope compound literal expression.
llvm::ConstantInt * CreateCrossDsoCfiTypeId(llvm::Metadata *MD)
Generate a cross-DSO type identifier for MD.
void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C)
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition: CGExpr.cpp:2879
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition: CGCXX.cpp:220
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1106
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetInfo & getTarget() const
llvm::Metadata * CreateMetadataIdentifierForType(QualType T)
Create a metadata identifier for the given type.
llvm::Constant * getTypeDescriptorFromMap(QualType Ty)
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
llvm::MDNode * getTBAABaseTypeInfo(QualType QTy)
getTBAABaseTypeInfo - Get metadata that describes the given base access type.
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGPointerAuthInfo getFunctionPointerAuthInfo(QualType T)
Return the abstract pointer authentication schema for a pointer to the given function type.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Metadata * CreateMetadataIdentifierGeneralized(QualType T)
Create a metadata identifier for the generalization of the given type.
const llvm::Triple & getTriple() const
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:246
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType)
getTBAAInfoForSubobject - Get TBAA information for an access with a given base lvalue.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ConstantAddress GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name=".str")
Return a pointer to a constant array for the given string literal.
ASTContext & getContext() const
ConstantAddress GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO)
Get the address of a template parameter object.
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
llvm::MDNode * getTBAATypeInfo(QualType QTy)
getTBAATypeInfo - Get metadata used to describe accesses to objects of the given type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, TBAAAccessInfo InfoB)
mergeTBAAInfoForConditionalOperator - Get merged TBAA information for the purposes of conditional ope...
llvm::LLVMContext & getLLVMContext()
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info, llvm::Function *F, bool IsThunk)
Set the LLVM function attributes (sext, zext, etc).
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F)
Set the LLVM function attributes which only apply to a function definition.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
ConstantAddress GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *)
Return a pointer to a constant array for the given ObjCEncodeExpr node.
ConstantAddress GetAddrOfConstantCString(const std::string &Str, const char *GlobalName=nullptr)
Returns a pointer to a character array containing the literal and a terminating '\0' character.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1606
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenTypes.h:99
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
Definition: CGCall.cpp:638
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:294
ConstantAddress withElementType(llvm::Type *ElemTy) const
Definition: Address.h:310
llvm::Constant * getPointer() const
Definition: Address.h:306
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:368
void mergeForCast(const LValueBaseInfo &Info)
Definition: CGValue.h:174
AlignmentSource getAlignmentSource() const
Definition: CGValue.h:171
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isMatrixElt() const
Definition: CGValue.h:283
Expr * getBaseIvarExp() const
Definition: CGValue.h:332
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:409
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition: CGValue.h:478
void setObjCIvar(bool Value)
Definition: CGValue.h:298
bool isObjCArray() const
Definition: CGValue.h:300
bool isObjCStrong() const
Definition: CGValue.h:324
bool isGlobalObjCRef() const
Definition: CGValue.h:306
bool isVectorElt() const
Definition: CGValue.h:279
void setObjCArray(bool Value)
Definition: CGValue.h:301
bool isSimple() const
Definition: CGValue.h:278
bool isVolatileQualified() const
Definition: CGValue.h:285
RValue asAggregateRValue() const
Definition: CGValue.h:498
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition: CGValue.h:395
llvm::Value * getGlobalReg() const
Definition: CGValue.h:430
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:432
bool isVolatile() const
Definition: CGValue.h:328
const Qualifiers & getQuals() const
Definition: CGValue.h:338
bool isGlobalReg() const
Definition: CGValue.h:282
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:452
bool isObjCWeak() const
Definition: CGValue.h:321
Address getAddress() const
Definition: CGValue.h:361
unsigned getVRQualifiers() const
Definition: CGValue.h:287
void setThreadLocalRef(bool Value)
Definition: CGValue.h:310
LValue setKnownNonNull()
Definition: CGValue.h:350
bool isNonGC() const
Definition: CGValue.h:303
void setGlobalObjCRef(bool Value)
Definition: CGValue.h:307
bool isExtVectorElt() const
Definition: CGValue.h:281
llvm::Value * getVectorIdx() const
Definition: CGValue.h:382
void setNontemporal(bool Value)
Definition: CGValue.h:319
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:346
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition: CGValue.h:315
QualType getType() const
Definition: CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:424
bool isThreadLocalRef() const
Definition: CGValue.h:309
KnownNonNull_t isKnownNonNull() const
Definition: CGValue.h:349
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
Address getVectorAddress() const
Definition: CGValue.h:370
bool isNontemporal() const
Definition: CGValue.h:318
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:468
bool isObjCIvar() const
Definition: CGValue.h:297
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:442
void setAddress(Address address)
Definition: CGValue.h:363
void setBaseIvarExp(Expr *V)
Definition: CGValue.h:333
Address getExtVectorAddress() const
Definition: CGValue.h:401
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:488
Address getMatrixAddress() const
Definition: CGValue.h:387
Address getBitFieldAddress() const
Definition: CGValue.h:415
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:108
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
An abstract representation of an aligned address.
Definition: Address.h:42
RawAddress withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:77
llvm::Value * getPointer() const
Definition: Address.h:66
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:372
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:219
Complex values, per C99 6.2.5p11.
Definition: Type.h:3134
QualType getElementType() const
Definition: Type.h:3144
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3428
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:195
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4219
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4237
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
Definition: DeclBase.cpp:2014
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition: Expr.h:1463
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition: Expr.cpp:488
ValueDecl * getDecl()
Definition: Expr.h:1333
SourceLocation getLocation() const
Definition: Expr.h:1341
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:580
SourceLocation getLocation() const
Definition: DeclBase.h:446
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:552
DeclContext * getDeclContext()
Definition: DeclBase.h:455
bool hasAttr() const
Definition: DeclBase.h:584
void ConvertArgToString(ArgumentKind Kind, intptr_t Val, StringRef Modifier, StringRef Argument, ArrayRef< ArgumentValue > PrevArgs, SmallVectorImpl< char > &Output, ArrayRef< intptr_t > QualTypeVals) const
Converts a diagnostic argument (as an intptr_t) into the string that represents it.
Definition: Diagnostic.h:880
Represents an enum.
Definition: Decl.h:3844
bool isFixed() const
Returns true if this is an Objective-C, C++11, or Microsoft-style enumeration with a fixed underlying...
Definition: Decl.h:4058
void getValueRange(llvm::APInt &Max, llvm::APInt &Min) const
Calculates the [Min,Max) values the enum can store based on the NumPositiveBits and NumNegativeBits.
Definition: Decl.cpp:4972
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:5991
EnumDecl * getDecl() const
Definition: Type.h:5998
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3750
This represents one expression.
Definition: Expr.h:110
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition: Expr.cpp:82
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3097
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition: Expr.h:437
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition: Expr.cpp:3070
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3058
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3066
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition: Expr.h:278
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition: Expr.cpp:1545
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3567
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3050
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
bool isFlexibleArrayMemberLike(ASTContext &Context, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution=false) const
Check whether this array fits the idiom of a flexible array member, depending on the value of -fstric...
Definition: Expr.cpp:206
QualType getType() const
Definition: Expr.h:142
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition: Expr.cpp:2981
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6305
Represents a member of a struct/union/class.
Definition: Decl.h:3030
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3121
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4630
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3247
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition: Decl.cpp:4681
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:1044
const Expr * getSubExpr() const
Definition: Expr.h:1057
Represents a function declaration or definition.
Definition: Decl.h:1932
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3618
Represents a prototype with parameter type info, e.g.
Definition: Type.h:5002
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
Describes an C or C++ initializer list.
Definition: Expr.h:5039
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:482
virtual void mangleCXXRTTI(QualType T, raw_ostream &)=0
unsigned getBlockId(const BlockDecl *BD, bool Local)
Definition: Mangle.h:84
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4727
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition: ExprCXX.h:4752
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition: ExprCXX.h:4744
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition: ExprCXX.h:4777
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2752
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3187
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition: Expr.h:3270
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why? This is only meaningful if the named memb...
Definition: Expr.h:3411
Expr * getBase() const
Definition: Expr.h:3264
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:3382
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3508
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition: NSAPI.cpp:481
This represents a decl that may have a name.
Definition: Decl.h:249
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
A C++ nested-name-specifier augmented with source location information.
bool containsType(SanitizerMask Mask, StringRef MangledTypeName, StringRef Category=StringRef()) const
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1951
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
Represents a class type in Objective C.
Definition: Type.h:7145
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
bool isUnique() const
Definition: Expr.h:1231
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2135
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3187
QualType getPointeeType() const
Definition: Type.h:3197
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
StringRef getIdentKindName() const
Definition: Expr.h:2048
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
bool isValid() const
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6497
const Expr *const * const_semantics_iterator
Definition: Expr.h:6562
A (possibly-)qualified type.
Definition: Type.h:941
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:7834
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1008
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7876
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7790
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1444
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:7844
QualType withCVRQualifiers(unsigned CVR) const
Definition: Type.h:1186
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1542
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1040
The collection of all-type qualifiers we support.
Definition: Type.h:319
unsigned getCVRQualifiers() const
Definition: Type.h:475
GC getObjCGCAttr() const
Definition: Type.h:506
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:348
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:341
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:337
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:351
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:354
bool hasConst() const
Definition: Type.h:444
void addCVRQualifiers(unsigned mask)
Definition: Type.h:489
void removeObjCGCAttr()
Definition: Type.h:510
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition: Type.h:637
void setAddressSpace(LangAS space)
Definition: Type.h:578
bool hasVolatile() const
Definition: Type.h:454
ObjCLifetime getObjCLifetime() const
Definition: Type.h:532
void addVolatile()
Definition: Type.h:457
Represents a struct/union/class.
Definition: Decl.h:4145
field_range fields() const
Definition: Decl.h:4351
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition: Decl.h:4336
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5965
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:205
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4417
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1363
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
bool isUnion() const
Definition: Decl.h:3767
Exposes information about the current target.
Definition: TargetInfo.h:218
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1256
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1324
const Type * getTypeForDecl() const
Definition: Decl.h:3391
The type-property cache.
Definition: Type.cpp:4437
The base class of the type hierarchy.
Definition: Type.h:1829
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1882
bool isBlockPointerType() const
Definition: Type.h:8017
bool isVoidType() const
Definition: Type.h:8319
bool isBooleanType() const
Definition: Type.h:8447
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2167
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition: Type.cpp:1899
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2146
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition: Type.h:8616
bool isArrayType() const
Definition: Type.h:8075
bool isFunctionPointerType() const
Definition: Type.h:8043
bool isCountAttributedType() const
Definition: Type.cpp:694
bool isArithmeticType() const
Definition: Type.cpp:2281
bool isConstantMatrixType() const
Definition: Type.h:8137
bool isPointerType() const
Definition: Type.h:8003
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:8359
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8607
bool isReferenceType() const
Definition: Type.h:8021
bool isVariableArrayType() const
Definition: Type.h:8087
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:705
bool isExtVectorBoolType() const
Definition: Type.h:8123
bool isBitIntType() const
Definition: Type.h:8241
bool isAnyComplexType() const
Definition: Type.h:8111
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition: Type.h:8490
bool isAtomicType() const
Definition: Type.h:8158
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2713
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2362
bool isFunctionType() const
Definition: Type.h:7999
bool isObjCObjectPointerType() const
Definition: Type.h:8145
bool isVectorType() const
Definition: Type.h:8115
bool isFloatingType() const
Definition: Type.cpp:2249
bool isSubscriptableVectorType() const
Definition: Type.h:8129
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8540
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition: Type.cpp:605
bool isRecordType() const
Definition: Type.h:8103
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.cpp:1886
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2188
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4701
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:667
QualType getType() const
Definition: Decl.h:678
QualType getType() const
Definition: Value.cpp:234
Represents a variable declaration or definition.
Definition: Decl.h:879
TLSKind getTLSKind() const
Definition: Decl.cpp:2150
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition: Decl.cpp:2348
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition: Decl.h:1132
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition: Decl.h:905
@ TLS_None
Not a TLS variable.
Definition: Decl.h:899
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3795
Represents a GCC generic vector type.
Definition: Type.h:4021
unsigned getNumElements() const
Definition: Type.h:4036
#define INT_MIN
Definition: limits.h:55
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
@ ARCImpreciseLifetime
Definition: CGValue.h:136
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition: CGValue.h:159
@ NotKnownNonNull
Definition: Address.h:33
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< FunctionType > functionType
Matches FunctionType nodes.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition: CNFFormula.h:64
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
bool This(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2269
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2243
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1666
bool Cast(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2052
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:66
@ CPlusPlus
Definition: LangStandard.h:56
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition: Specifiers.h:154
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition: Specifiers.h:327
@ SD_Thread
Thread storage duration.
Definition: Specifiers.h:330
@ SD_Static
Static storage duration.
Definition: Specifiers.h:331
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition: Specifiers.h:328
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:329
@ SD_Dynamic
Dynamic storage duration.
Definition: Specifiers.h:332
@ Result
The result type of a method or function.
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
const FunctionProtoType * T
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:86
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
@ Other
Other implicit parameter.
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition: Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition: Specifiers.h:180
unsigned long uint64_t
unsigned int uint32_t
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
Definition: CodeGenTBAA.h:104
uint64_t Offset
Offset - The byte offset of the final access within the base one.
Definition: CodeGenTBAA.h:108
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:62
uint64_t Size
Size - The size of access, in bytes.
Definition: CodeGenTBAA.h:111
llvm::MDNode * BaseType
BaseType - The base/leading access type.
Definition: CodeGenTBAA.h:100
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition: Expr.h:609
PointerAuthSchema FunctionPointers
The ABI for C function pointers.
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:168
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition: Expr.h:66