clang 19.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGCall.h"
16#include "CGCleanup.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenMPRuntime.h"
20#include "CGRecordLayout.h"
21#include "CodeGenFunction.h"
22#include "CodeGenModule.h"
23#include "ConstantEmitter.h"
24#include "TargetInfo.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/NSAPI.h"
33#include "llvm/ADT/Hashing.h"
34#include "llvm/ADT/STLExtras.h"
35#include "llvm/ADT/StringExtras.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/Intrinsics.h"
38#include "llvm/IR/IntrinsicsWebAssembly.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/MDBuilder.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/Passes/OptimizationLevel.h"
43#include "llvm/Support/ConvertUTF.h"
44#include "llvm/Support/MathExtras.h"
45#include "llvm/Support/Path.h"
46#include "llvm/Support/SaveAndRestore.h"
47#include "llvm/Support/xxhash.h"
48#include "llvm/Transforms/Utils/SanitizerStats.h"
49
50#include <optional>
51#include <string>
52
53using namespace clang;
54using namespace CodeGen;
55
56// Experiment to make sanitizers easier to debug
57static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
58 "ubsan-unique-traps", llvm::cl::Optional,
59 llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."));
60
61// TODO: Introduce frontend options to enabled per sanitizers, similar to
62// `fsanitize-trap`.
63static llvm::cl::opt<bool> ClSanitizeGuardChecks(
64 "ubsan-guard-checks", llvm::cl::Optional,
65 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
66
67//===--------------------------------------------------------------------===//
68// Miscellaneous Helper Methods
69//===--------------------------------------------------------------------===//
70
71/// CreateTempAlloca - This creates a alloca and inserts it into the entry
72/// block.
74CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
75 const Twine &Name,
76 llvm::Value *ArraySize) {
77 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
78 Alloca->setAlignment(Align.getAsAlign());
79 return RawAddress(Alloca, Ty, Align, KnownNonNull);
80}
81
82/// CreateTempAlloca - This creates a alloca and inserts it into the entry
83/// block. The alloca is casted to default address space if necessary.
85 const Twine &Name,
86 llvm::Value *ArraySize,
87 RawAddress *AllocaAddr) {
88 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
89 if (AllocaAddr)
90 *AllocaAddr = Alloca;
91 llvm::Value *V = Alloca.getPointer();
92 // Alloca always returns a pointer in alloca address space, which may
93 // be different from the type defined by the language. For example,
94 // in C++ the auto variables are in the default address space. Therefore
95 // cast alloca to the default address space when necessary.
97 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
98 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
99 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
100 // otherwise alloca is inserted at the current insertion point of the
101 // builder.
102 if (!ArraySize)
103 Builder.SetInsertPoint(getPostAllocaInsertPoint());
106 Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
107 }
108
109 return RawAddress(V, Ty, Align, KnownNonNull);
110}
111
112/// CreateTempAlloca - This creates an alloca and inserts it into the entry
113/// block if \p ArraySize is nullptr, otherwise inserts it at the current
114/// insertion point of the builder.
115llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
116 const Twine &Name,
117 llvm::Value *ArraySize) {
118 if (ArraySize)
119 return Builder.CreateAlloca(Ty, ArraySize, Name);
120 return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
121 ArraySize, Name, AllocaInsertPt);
122}
123
124/// CreateDefaultAlignTempAlloca - This creates an alloca with the
125/// default alignment of the corresponding LLVM type, which is *not*
126/// guaranteed to be related in any way to the expected alignment of
127/// an AST type that might have been lowered to Ty.
129 const Twine &Name) {
130 CharUnits Align =
131 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
132 return CreateTempAlloca(Ty, Align, Name);
133}
134
137 return CreateTempAlloca(ConvertType(Ty), Align, Name);
138}
139
141 RawAddress *Alloca) {
142 // FIXME: Should we prefer the preferred type alignment here?
143 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
144}
145
147 const Twine &Name,
148 RawAddress *Alloca) {
150 /*ArraySize=*/nullptr, Alloca);
151
152 if (Ty->isConstantMatrixType()) {
153 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
154 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
155 ArrayTy->getNumElements());
156
157 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
159 }
160 return Result;
161}
162
164 CharUnits Align,
165 const Twine &Name) {
166 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
167}
168
170 const Twine &Name) {
171 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
172 Name);
173}
174
175/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
176/// expression and compare the result against zero, returning an Int1Ty value.
177llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
178 PGO.setCurrentStmt(E);
179 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
180 llvm::Value *MemPtr = EmitScalarExpr(E);
181 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
182 }
183
184 QualType BoolTy = getContext().BoolTy;
185 SourceLocation Loc = E->getExprLoc();
186 CGFPOptionsRAII FPOptsRAII(*this, E);
187 if (!E->getType()->isAnyComplexType())
188 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
189
191 Loc);
192}
193
194/// EmitIgnoredExpr - Emit code to compute the specified expression,
195/// ignoring the result.
197 if (E->isPRValue())
198 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
199
200 // if this is a bitfield-resulting conditional operator, we can special case
201 // emit this. The normal 'EmitLValue' version of this is particularly
202 // difficult to codegen for, since creating a single "LValue" for two
203 // different sized arguments here is not particularly doable.
204 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
206 if (CondOp->getObjectKind() == OK_BitField)
207 return EmitIgnoredConditionalOperator(CondOp);
208 }
209
210 // Just emit it as an l-value and drop the result.
211 EmitLValue(E);
212}
213
214/// EmitAnyExpr - Emit code to compute the specified expression which
215/// can have any type. The result is returned as an RValue struct.
216/// If this is an aggregate expression, AggSlot indicates where the
217/// result should be returned.
219 AggValueSlot aggSlot,
220 bool ignoreResult) {
221 switch (getEvaluationKind(E->getType())) {
222 case TEK_Scalar:
223 return RValue::get(EmitScalarExpr(E, ignoreResult));
224 case TEK_Complex:
225 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
226 case TEK_Aggregate:
227 if (!ignoreResult && aggSlot.isIgnored())
228 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
229 EmitAggExpr(E, aggSlot);
230 return aggSlot.asRValue();
231 }
232 llvm_unreachable("bad evaluation kind");
233}
234
235/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
236/// always be accessible even if no aggregate location is provided.
239
241 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
242 return EmitAnyExpr(E, AggSlot);
243}
244
245/// EmitAnyExprToMem - Evaluate an expression into a given memory
246/// location.
248 Address Location,
249 Qualifiers Quals,
250 bool IsInit) {
251 // FIXME: This function should take an LValue as an argument.
252 switch (getEvaluationKind(E->getType())) {
253 case TEK_Complex:
255 /*isInit*/ false);
256 return;
257
258 case TEK_Aggregate: {
259 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
264 return;
265 }
266
267 case TEK_Scalar: {
268 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
269 LValue LV = MakeAddrLValue(Location, E->getType());
271 return;
272 }
273 }
274 llvm_unreachable("bad evaluation kind");
275}
276
277static void
279 const Expr *E, Address ReferenceTemporary) {
280 // Objective-C++ ARC:
281 // If we are binding a reference to a temporary that has ownership, we
282 // need to perform retain/release operations on the temporary.
283 //
284 // FIXME: This should be looking at E, not M.
285 if (auto Lifetime = M->getType().getObjCLifetime()) {
286 switch (Lifetime) {
289 // Carry on to normal cleanup handling.
290 break;
291
293 // Nothing to do; cleaned up by an autorelease pool.
294 return;
295
298 switch (StorageDuration Duration = M->getStorageDuration()) {
299 case SD_Static:
300 // Note: we intentionally do not register a cleanup to release
301 // the object on program termination.
302 return;
303
304 case SD_Thread:
305 // FIXME: We should probably register a cleanup in this case.
306 return;
307
308 case SD_Automatic:
312 if (Lifetime == Qualifiers::OCL_Strong) {
313 const ValueDecl *VD = M->getExtendingDecl();
314 bool Precise =
315 VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
319 } else {
320 // __weak objects always get EH cleanups; otherwise, exceptions
321 // could cause really nasty crashes instead of mere leaks.
324 }
325 if (Duration == SD_FullExpression)
326 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
327 M->getType(), *Destroy,
329 else
330 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
331 M->getType(),
332 *Destroy, CleanupKind & EHCleanup);
333 return;
334
335 case SD_Dynamic:
336 llvm_unreachable("temporary cannot have dynamic storage duration");
337 }
338 llvm_unreachable("unknown storage duration");
339 }
340 }
341
342 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
343 if (const RecordType *RT =
345 // Get the destructor for the reference temporary.
346 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
347 if (!ClassDecl->hasTrivialDestructor())
348 ReferenceTemporaryDtor = ClassDecl->getDestructor();
349 }
350
351 if (!ReferenceTemporaryDtor)
352 return;
353
354 // Call the destructor for the temporary.
355 switch (M->getStorageDuration()) {
356 case SD_Static:
357 case SD_Thread: {
358 llvm::FunctionCallee CleanupFn;
359 llvm::Constant *CleanupArg;
360 if (E->getType()->isArrayType()) {
362 ReferenceTemporary, E->getType(),
364 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
365 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
366 } else {
367 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
368 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
369 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
370 }
372 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
373 break;
374 }
375
377 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
379 CGF.getLangOpts().Exceptions);
380 break;
381
382 case SD_Automatic:
384 ReferenceTemporary, E->getType(),
386 CGF.getLangOpts().Exceptions);
387 break;
388
389 case SD_Dynamic:
390 llvm_unreachable("temporary cannot have dynamic storage duration");
391 }
392}
393
396 const Expr *Inner,
397 RawAddress *Alloca = nullptr) {
398 auto &TCG = CGF.getTargetHooks();
399 switch (M->getStorageDuration()) {
401 case SD_Automatic: {
402 // If we have a constant temporary array or record try to promote it into a
403 // constant global under the same rules a normal constant would've been
404 // promoted. This is easier on the optimizer and generally emits fewer
405 // instructions.
406 QualType Ty = Inner->getType();
407 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
408 (Ty->isArrayType() || Ty->isRecordType()) &&
409 Ty.isConstantStorage(CGF.getContext(), true, false))
410 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
411 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
412 auto *GV = new llvm::GlobalVariable(
413 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
414 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
415 llvm::GlobalValue::NotThreadLocal,
417 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
418 GV->setAlignment(alignment.getAsAlign());
419 llvm::Constant *C = GV;
420 if (AS != LangAS::Default)
421 C = TCG.performAddrSpaceCast(
422 CGF.CGM, GV, AS, LangAS::Default,
423 GV->getValueType()->getPointerTo(
425 // FIXME: Should we put the new global into a COMDAT?
426 return RawAddress(C, GV->getValueType(), alignment);
427 }
428 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
429 }
430 case SD_Thread:
431 case SD_Static:
432 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
433
434 case SD_Dynamic:
435 llvm_unreachable("temporary can't have dynamic storage duration");
436 }
437 llvm_unreachable("unknown storage duration");
438}
439
440/// Helper method to check if the underlying ABI is AAPCS
441static bool isAAPCS(const TargetInfo &TargetInfo) {
442 return TargetInfo.getABI().starts_with("aapcs");
443}
444
447 const Expr *E = M->getSubExpr();
448
449 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
450 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
451 "Reference should never be pseudo-strong!");
452
453 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
454 // as that will cause the lifetime adjustment to be lost for ARC
455 auto ownership = M->getType().getObjCLifetime();
456 if (ownership != Qualifiers::OCL_None &&
457 ownership != Qualifiers::OCL_ExplicitNone) {
459 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
460 llvm::Type *Ty = ConvertTypeForMem(E->getType());
461 Object = Object.withElementType(Ty);
462
463 // createReferenceTemporary will promote the temporary to a global with a
464 // constant initializer if it can. It can only do this to a value of
465 // ARC-manageable type if the value is global and therefore "immune" to
466 // ref-counting operations. Therefore we have no need to emit either a
467 // dynamic initialization or a cleanup and we can just return the address
468 // of the temporary.
469 if (Var->hasInitializer())
470 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
471
472 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
473 }
474 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
476
477 switch (getEvaluationKind(E->getType())) {
478 default: llvm_unreachable("expected scalar or aggregate expression");
479 case TEK_Scalar:
480 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
481 break;
482 case TEK_Aggregate: {
484 E->getType().getQualifiers(),
489 break;
490 }
491 }
492
493 pushTemporaryCleanup(*this, M, E, Object);
494 return RefTempDst;
495 }
496
499 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
500
501 for (const auto &Ignored : CommaLHSs)
502 EmitIgnoredExpr(Ignored);
503
504 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
505 if (opaque->getType()->isRecordType()) {
506 assert(Adjustments.empty());
507 return EmitOpaqueValueLValue(opaque);
508 }
509 }
510
511 // Create and initialize the reference temporary.
512 RawAddress Alloca = Address::invalid();
513 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
514 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
515 Object.getPointer()->stripPointerCasts())) {
516 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
517 Object = Object.withElementType(TemporaryType);
518 // If the temporary is a global and has a constant initializer or is a
519 // constant temporary that we promoted to a global, we may have already
520 // initialized it.
521 if (!Var->hasInitializer()) {
522 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
523 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
524 }
525 } else {
526 switch (M->getStorageDuration()) {
527 case SD_Automatic:
528 if (auto *Size = EmitLifetimeStart(
529 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
530 Alloca.getPointer())) {
531 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
532 Alloca, Size);
533 }
534 break;
535
536 case SD_FullExpression: {
537 if (!ShouldEmitLifetimeMarkers)
538 break;
539
540 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
541 // marker. Instead, start the lifetime of a conditional temporary earlier
542 // so that it's unconditional. Don't do this with sanitizers which need
543 // more precise lifetime marks. However when inside an "await.suspend"
544 // block, we should always avoid conditional cleanup because it creates
545 // boolean marker that lives across await_suspend, which can destroy coro
546 // frame.
547 ConditionalEvaluation *OldConditional = nullptr;
548 CGBuilderTy::InsertPoint OldIP;
550 ((!SanOpts.has(SanitizerKind::HWAddress) &&
551 !SanOpts.has(SanitizerKind::Memory) &&
552 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
553 inSuspendBlock())) {
554 OldConditional = OutermostConditional;
555 OutermostConditional = nullptr;
556
557 OldIP = Builder.saveIP();
558 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
559 Builder.restoreIP(CGBuilderTy::InsertPoint(
560 Block, llvm::BasicBlock::iterator(Block->back())));
561 }
562
563 if (auto *Size = EmitLifetimeStart(
564 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
565 Alloca.getPointer())) {
566 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
567 Size);
568 }
569
570 if (OldConditional) {
571 OutermostConditional = OldConditional;
572 Builder.restoreIP(OldIP);
573 }
574 break;
575 }
576
577 default:
578 break;
579 }
580 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
581 }
582 pushTemporaryCleanup(*this, M, E, Object);
583
584 // Perform derived-to-base casts and/or field accesses, to get from the
585 // temporary object we created (and, potentially, for which we extended
586 // the lifetime) to the subobject we're binding the reference to.
587 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
588 switch (Adjustment.Kind) {
590 Object =
591 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
592 Adjustment.DerivedToBase.BasePath->path_begin(),
593 Adjustment.DerivedToBase.BasePath->path_end(),
594 /*NullCheckValue=*/ false, E->getExprLoc());
595 break;
596
599 LV = EmitLValueForField(LV, Adjustment.Field);
600 assert(LV.isSimple() &&
601 "materialized temporary field is not a simple lvalue");
602 Object = LV.getAddress(*this);
603 break;
604 }
605
607 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
609 Adjustment.Ptr.MPT);
610 break;
611 }
612 }
613 }
614
615 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
616}
617
618RValue
620 // Emit the expression as an lvalue.
621 LValue LV = EmitLValue(E);
622 assert(LV.isSimple());
623 llvm::Value *Value = LV.getPointer(*this);
624
626 // C++11 [dcl.ref]p5 (as amended by core issue 453):
627 // If a glvalue to which a reference is directly bound designates neither
628 // an existing object or function of an appropriate type nor a region of
629 // storage of suitable size and alignment to contain an object of the
630 // reference's type, the behavior is undefined.
631 QualType Ty = E->getType();
633 }
634
635 return RValue::get(Value);
636}
637
638
639/// getAccessedFieldNo - Given an encoded value and a result number, return the
640/// input field number being accessed.
641unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
642 const llvm::Constant *Elts) {
643 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
644 ->getZExtValue();
645}
646
647/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
648static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
649 llvm::Value *High) {
650 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
651 llvm::Value *K47 = Builder.getInt64(47);
652 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
653 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
654 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
655 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
656 return Builder.CreateMul(B1, KMul);
657}
658
659bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
660 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
662}
663
664bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
666 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
667 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
670}
671
673 return SanOpts.has(SanitizerKind::Null) ||
674 SanOpts.has(SanitizerKind::Alignment) ||
675 SanOpts.has(SanitizerKind::ObjectSize) ||
676 SanOpts.has(SanitizerKind::Vptr);
677}
678
679void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
680 llvm::Value *Ptr, QualType Ty,
681 CharUnits Alignment,
682 SanitizerSet SkippedChecks,
683 llvm::Value *ArraySize) {
685 return;
686
687 // Don't check pointers outside the default address space. The null check
688 // isn't correct, the object-size check isn't supported by LLVM, and we can't
689 // communicate the addresses to the runtime handler for the vptr check.
690 if (Ptr->getType()->getPointerAddressSpace())
691 return;
692
693 // Don't check pointers to volatile data. The behavior here is implementation-
694 // defined.
695 if (Ty.isVolatileQualified())
696 return;
697
698 SanitizerScope SanScope(this);
699
701 llvm::BasicBlock *Done = nullptr;
702
703 // Quickly determine whether we have a pointer to an alloca. It's possible
704 // to skip null checks, and some alignment checks, for these pointers. This
705 // can reduce compile-time significantly.
706 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
707
708 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
709 llvm::Value *IsNonNull = nullptr;
710 bool IsGuaranteedNonNull =
711 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
712 bool AllowNullPointers = isNullPointerAllowed(TCK);
713 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
714 !IsGuaranteedNonNull) {
715 // The glvalue must not be an empty glvalue.
716 IsNonNull = Builder.CreateIsNotNull(Ptr);
717
718 // The IR builder can constant-fold the null check if the pointer points to
719 // a constant.
720 IsGuaranteedNonNull = IsNonNull == True;
721
722 // Skip the null check if the pointer is known to be non-null.
723 if (!IsGuaranteedNonNull) {
724 if (AllowNullPointers) {
725 // When performing pointer casts, it's OK if the value is null.
726 // Skip the remaining checks in that case.
727 Done = createBasicBlock("null");
728 llvm::BasicBlock *Rest = createBasicBlock("not.null");
729 Builder.CreateCondBr(IsNonNull, Rest, Done);
730 EmitBlock(Rest);
731 } else {
732 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
733 }
734 }
735 }
736
737 if (SanOpts.has(SanitizerKind::ObjectSize) &&
738 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
739 !Ty->isIncompleteType()) {
741 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
742 if (ArraySize)
743 Size = Builder.CreateMul(Size, ArraySize);
744
745 // Degenerate case: new X[0] does not need an objectsize check.
746 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
747 if (!ConstantSize || !ConstantSize->isNullValue()) {
748 // The glvalue must refer to a large enough storage region.
749 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
750 // to check this.
751 // FIXME: Get object address space
752 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
753 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
754 llvm::Value *Min = Builder.getFalse();
755 llvm::Value *NullIsUnknown = Builder.getFalse();
756 llvm::Value *Dynamic = Builder.getFalse();
757 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
758 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
759 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
760 }
761 }
762
763 llvm::MaybeAlign AlignVal;
764 llvm::Value *PtrAsInt = nullptr;
765
766 if (SanOpts.has(SanitizerKind::Alignment) &&
767 !SkippedChecks.has(SanitizerKind::Alignment)) {
768 AlignVal = Alignment.getAsMaybeAlign();
769 if (!Ty->isIncompleteType() && !AlignVal)
770 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
771 /*ForPointeeType=*/true)
773
774 // The glvalue must be suitably aligned.
775 if (AlignVal && *AlignVal > llvm::Align(1) &&
776 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
777 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
778 llvm::Value *Align = Builder.CreateAnd(
779 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
780 llvm::Value *Aligned =
781 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
782 if (Aligned != True)
783 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
784 }
785 }
786
787 if (Checks.size() > 0) {
788 llvm::Constant *StaticData[] = {
790 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
791 llvm::ConstantInt::get(Int8Ty, TCK)};
792 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
793 PtrAsInt ? PtrAsInt : Ptr);
794 }
795
796 // If possible, check that the vptr indicates that there is a subobject of
797 // type Ty at offset zero within this object.
798 //
799 // C++11 [basic.life]p5,6:
800 // [For storage which does not refer to an object within its lifetime]
801 // The program has undefined behavior if:
802 // -- the [pointer or glvalue] is used to access a non-static data member
803 // or call a non-static member function
804 if (SanOpts.has(SanitizerKind::Vptr) &&
805 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
806 // Ensure that the pointer is non-null before loading it. If there is no
807 // compile-time guarantee, reuse the run-time null check or emit a new one.
808 if (!IsGuaranteedNonNull) {
809 if (!IsNonNull)
810 IsNonNull = Builder.CreateIsNotNull(Ptr);
811 if (!Done)
812 Done = createBasicBlock("vptr.null");
813 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
814 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
815 EmitBlock(VptrNotNull);
816 }
817
818 // Compute a hash of the mangled name of the type.
819 //
820 // FIXME: This is not guaranteed to be deterministic! Move to a
821 // fingerprinting mechanism once LLVM provides one. For the time
822 // being the implementation happens to be deterministic.
823 SmallString<64> MangledName;
824 llvm::raw_svector_ostream Out(MangledName);
826 Out);
827
828 // Contained in NoSanitizeList based on the mangled type.
829 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
830 Out.str())) {
831 llvm::hash_code TypeHash = hash_value(Out.str());
832
833 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
834 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
835 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
836 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
837 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
838
839 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
840 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
841
842 // Look the hash up in our cache.
843 const int CacheSize = 128;
844 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
845 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
846 "__ubsan_vptr_type_cache");
847 llvm::Value *Slot = Builder.CreateAnd(Hash,
848 llvm::ConstantInt::get(IntPtrTy,
849 CacheSize-1));
850 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
851 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
852 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
854
855 // If the hash isn't in the cache, call a runtime handler to perform the
856 // hard work of checking whether the vptr is for an object of the right
857 // type. This will either fill in the cache and return, or produce a
858 // diagnostic.
859 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
860 llvm::Constant *StaticData[] = {
864 llvm::ConstantInt::get(Int8Ty, TCK)
865 };
866 llvm::Value *DynamicData[] = { Ptr, Hash };
867 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
868 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
869 DynamicData);
870 }
871 }
872
873 if (Done) {
874 Builder.CreateBr(Done);
875 EmitBlock(Done);
876 }
877}
878
879llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
880 QualType EltTy) {
882 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
883 if (!EltSize)
884 return nullptr;
885
886 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
887 if (!ArrayDeclRef)
888 return nullptr;
889
890 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
891 if (!ParamDecl)
892 return nullptr;
893
894 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
895 if (!POSAttr)
896 return nullptr;
897
898 // Don't load the size if it's a lower bound.
899 int POSType = POSAttr->getType();
900 if (POSType != 0 && POSType != 1)
901 return nullptr;
902
903 // Find the implicit size parameter.
904 auto PassedSizeIt = SizeArguments.find(ParamDecl);
905 if (PassedSizeIt == SizeArguments.end())
906 return nullptr;
907
908 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
909 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
910 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
911 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
912 C.getSizeType(), E->getExprLoc());
913 llvm::Value *SizeOfElement =
914 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
915 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
916}
917
918/// If Base is known to point to the start of an array, return the length of
919/// that array. Return 0 if the length cannot be determined.
920static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
921 const Expr *Base,
922 QualType &IndexedType,
924 StrictFlexArraysLevel) {
925 // For the vector indexing extension, the bound is the number of elements.
926 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
927 IndexedType = Base->getType();
928 return CGF.Builder.getInt32(VT->getNumElements());
929 }
930
931 Base = Base->IgnoreParens();
932
933 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
934 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
935 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
936 StrictFlexArraysLevel)) {
937 CodeGenFunction::SanitizerScope SanScope(&CGF);
938
939 IndexedType = CE->getSubExpr()->getType();
940 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
941 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
942 return CGF.Builder.getInt(CAT->getSize());
943
944 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
945 return CGF.getVLASize(VAT).NumElts;
946 // Ignore pass_object_size here. It's not applicable on decayed pointers.
947 }
948 }
949
950 CodeGenFunction::SanitizerScope SanScope(&CGF);
951
952 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
953 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
954 IndexedType = Base->getType();
955 return POS;
956 }
957
958 return nullptr;
959}
960
961namespace {
962
963/// \p StructAccessBase returns the base \p Expr of a field access. It returns
964/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
965///
966/// p in p-> a.b.c
967///
968/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
969/// looking for:
970///
971/// struct s {
972/// struct s *ptr;
973/// int count;
974/// char array[] __attribute__((counted_by(count)));
975/// };
976///
977/// If we have an expression like \p p->ptr->array[index], we want the
978/// \p MemberExpr for \p p->ptr instead of \p p.
979class StructAccessBase
980 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
981 const RecordDecl *ExpectedRD;
982
983 bool IsExpectedRecordDecl(const Expr *E) const {
984 QualType Ty = E->getType();
985 if (Ty->isPointerType())
986 Ty = Ty->getPointeeType();
987 return ExpectedRD == Ty->getAsRecordDecl();
988 }
989
990public:
991 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
992
993 //===--------------------------------------------------------------------===//
994 // Visitor Methods
995 //===--------------------------------------------------------------------===//
996
997 // NOTE: If we build C++ support for counted_by, then we'll have to handle
998 // horrors like this:
999 //
1000 // struct S {
1001 // int x, y;
1002 // int blah[] __attribute__((counted_by(x)));
1003 // } s;
1004 //
1005 // int foo(int index, int val) {
1006 // int (S::*IHatePMDs)[] = &S::blah;
1007 // (s.*IHatePMDs)[index] = val;
1008 // }
1009
1010 const Expr *Visit(const Expr *E) {
1012 }
1013
1014 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1015
1016 // These are the types we expect to return (in order of most to least
1017 // likely):
1018 //
1019 // 1. DeclRefExpr - This is the expression for the base of the structure.
1020 // It's exactly what we want to build an access to the \p counted_by
1021 // field.
1022 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1023 // as the flexble array member's lexical enclosing \p RecordDecl. This
1024 // allows us to catch things like: "p->p->array"
1025 // 3. CompoundLiteralExpr - This is for people who create something
1026 // heretical like (struct foo has a flexible array member):
1027 //
1028 // (struct foo){ 1, 2 }.blah[idx];
1029 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1030 return IsExpectedRecordDecl(E) ? E : nullptr;
1031 }
1032 const Expr *VisitMemberExpr(const MemberExpr *E) {
1033 if (IsExpectedRecordDecl(E) && E->isArrow())
1034 return E;
1035 const Expr *Res = Visit(E->getBase());
1036 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1037 }
1038 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1039 return IsExpectedRecordDecl(E) ? E : nullptr;
1040 }
1041 const Expr *VisitCallExpr(const CallExpr *E) {
1042 return IsExpectedRecordDecl(E) ? E : nullptr;
1043 }
1044
1045 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1046 if (IsExpectedRecordDecl(E))
1047 return E;
1048 return Visit(E->getBase());
1049 }
1050 const Expr *VisitCastExpr(const CastExpr *E) {
1051 return Visit(E->getSubExpr());
1052 }
1053 const Expr *VisitParenExpr(const ParenExpr *E) {
1054 return Visit(E->getSubExpr());
1055 }
1056 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1057 return Visit(E->getSubExpr());
1058 }
1059 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1060 return Visit(E->getSubExpr());
1061 }
1062};
1063
1064} // end anonymous namespace
1065
1068
1070 const FieldDecl *FD, RecIndicesTy &Indices) {
1071 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1072 int64_t FieldNo = -1;
1073 for (const Decl *D : RD->decls()) {
1074 if (const auto *Field = dyn_cast<FieldDecl>(D)) {
1075 FieldNo = Layout.getLLVMFieldNo(Field);
1076 if (FD == Field) {
1077 Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
1078 return true;
1079 }
1080 }
1081
1082 if (const auto *Record = dyn_cast<RecordDecl>(D)) {
1083 ++FieldNo;
1084 if (getGEPIndicesToField(CGF, Record, FD, Indices)) {
1085 if (RD->isUnion())
1086 FieldNo = 0;
1087 Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
1088 return true;
1089 }
1090 }
1091 }
1092
1093 return false;
1094}
1095
1096/// This method is typically called in contexts where we can't generate
1097/// side-effects, like in __builtin_dynamic_object_size. When finding
1098/// expressions, only choose those that have either already been emitted or can
1099/// be loaded without side-effects.
1100///
1101/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1102/// within the top-level struct.
1103/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1105 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1106 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1107
1108 // Find the base struct expr (i.e. p in p->a.b.c.d).
1109 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1110 if (!StructBase || StructBase->HasSideEffects(getContext()))
1111 return nullptr;
1112
1113 llvm::Value *Res = nullptr;
1114 if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) {
1115 Res = EmitDeclRefLValue(DRE).getPointer(*this);
1116 Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res,
1117 getPointerAlign(), "dre.load");
1118 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
1119 LValue LV = EmitMemberExpr(ME);
1120 Address Addr = LV.getAddress(*this);
1121 Res = Addr.emitRawPointer(*this);
1122 } else if (StructBase->getType()->isPointerType()) {
1123 LValueBaseInfo BaseInfo;
1124 TBAAAccessInfo TBAAInfo;
1125 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1126 Res = Addr.emitRawPointer(*this);
1127 } else {
1128 return nullptr;
1129 }
1130
1131 llvm::Value *Zero = Builder.getInt32(0);
1132 RecIndicesTy Indices;
1133
1134 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1135
1136 for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I)
1138 ConvertType(QualType(I->first->getTypeForDecl(), 0)), Res,
1139 {Zero, I->second}, "..counted_by.gep");
1140
1141 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res,
1142 getIntAlign(), "..counted_by.load");
1143}
1144
1146 if (!FD)
1147 return nullptr;
1148
1149 const auto *CAT = FD->getType()->getAs<CountAttributedType>();
1150 if (!CAT)
1151 return nullptr;
1152
1153 const auto *CountDRE = cast<DeclRefExpr>(CAT->getCountExpr());
1154 const auto *CountDecl = CountDRE->getDecl();
1155 if (const auto *IFD = dyn_cast<IndirectFieldDecl>(CountDecl))
1156 CountDecl = IFD->getAnonField();
1157
1158 return dyn_cast<FieldDecl>(CountDecl);
1159}
1160
1161void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1162 llvm::Value *Index, QualType IndexType,
1163 bool Accessed) {
1164 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1165 "should not be called unless adding bounds checks");
1166 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1167 getLangOpts().getStrictFlexArraysLevel();
1168 QualType IndexedType;
1169 llvm::Value *Bound =
1170 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1171
1172 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1173}
1174
1175void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1176 llvm::Value *Index,
1177 QualType IndexType,
1178 QualType IndexedType, bool Accessed) {
1179 if (!Bound)
1180 return;
1181
1182 SanitizerScope SanScope(this);
1183
1184 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1185 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1186 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1187
1188 llvm::Constant *StaticData[] = {
1190 EmitCheckTypeDescriptor(IndexedType),
1191 EmitCheckTypeDescriptor(IndexType)
1192 };
1193 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1194 : Builder.CreateICmpULE(IndexVal, BoundVal);
1195 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1196 SanitizerHandler::OutOfBounds, StaticData, Index);
1197}
1198
1201 bool isInc, bool isPre) {
1202 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1203
1204 llvm::Value *NextVal;
1205 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1206 uint64_t AmountVal = isInc ? 1 : -1;
1207 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1208
1209 // Add the inc/dec to the real part.
1210 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1211 } else {
1212 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1213 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1214 if (!isInc)
1215 FVal.changeSign();
1216 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1217
1218 // Add the inc/dec to the real part.
1219 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1220 }
1221
1222 ComplexPairTy IncVal(NextVal, InVal.second);
1223
1224 // Store the updated result through the lvalue.
1225 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1226 if (getLangOpts().OpenMP)
1228 E->getSubExpr());
1229
1230 // If this is a postinc, return the value read from memory, otherwise use the
1231 // updated value.
1232 return isPre ? IncVal : InVal;
1233}
1234
1236 CodeGenFunction *CGF) {
1237 // Bind VLAs in the cast type.
1238 if (CGF && E->getType()->isVariablyModifiedType())
1240
1241 if (CGDebugInfo *DI = getModuleDebugInfo())
1242 DI->EmitExplicitCastType(E->getType());
1243}
1244
1245//===----------------------------------------------------------------------===//
1246// LValue Expression Emission
1247//===----------------------------------------------------------------------===//
1248
1250 TBAAAccessInfo *TBAAInfo,
1251 KnownNonNull_t IsKnownNonNull,
1252 CodeGenFunction &CGF) {
1253 // We allow this with ObjC object pointers because of fragile ABIs.
1254 assert(E->getType()->isPointerType() ||
1256 E = E->IgnoreParens();
1257
1258 // Casts:
1259 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1260 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1261 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1262
1263 switch (CE->getCastKind()) {
1264 // Non-converting casts (but not C's implicit conversion from void*).
1265 case CK_BitCast:
1266 case CK_NoOp:
1267 case CK_AddressSpaceConversion:
1268 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1269 if (PtrTy->getPointeeType()->isVoidType())
1270 break;
1271
1272 LValueBaseInfo InnerBaseInfo;
1273 TBAAAccessInfo InnerTBAAInfo;
1275 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1276 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1277 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1278
1279 if (isa<ExplicitCastExpr>(CE)) {
1280 LValueBaseInfo TargetTypeBaseInfo;
1281 TBAAAccessInfo TargetTypeTBAAInfo;
1283 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1284 if (TBAAInfo)
1285 *TBAAInfo =
1286 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1287 // If the source l-value is opaque, honor the alignment of the
1288 // casted-to type.
1289 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1290 if (BaseInfo)
1291 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1292 Addr.setAlignment(Align);
1293 }
1294 }
1295
1296 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1297 CE->getCastKind() == CK_BitCast) {
1298 if (auto PT = E->getType()->getAs<PointerType>())
1299 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1300 /*MayBeNull=*/true,
1302 CE->getBeginLoc());
1303 }
1304
1305 llvm::Type *ElemTy =
1307 Addr = Addr.withElementType(ElemTy);
1308 if (CE->getCastKind() == CK_AddressSpaceConversion)
1309 Addr = CGF.Builder.CreateAddrSpaceCast(
1310 Addr, CGF.ConvertType(E->getType()), ElemTy);
1311 return Addr;
1312 }
1313 break;
1314
1315 // Array-to-pointer decay.
1316 case CK_ArrayToPointerDecay:
1317 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1318
1319 // Derived-to-base conversions.
1320 case CK_UncheckedDerivedToBase:
1321 case CK_DerivedToBase: {
1322 // TODO: Support accesses to members of base classes in TBAA. For now, we
1323 // conservatively pretend that the complete object is of the base class
1324 // type.
1325 if (TBAAInfo)
1326 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1328 CE->getSubExpr(), BaseInfo, nullptr,
1329 (KnownNonNull_t)(IsKnownNonNull ||
1330 CE->getCastKind() == CK_UncheckedDerivedToBase));
1331 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1332 return CGF.GetAddressOfBaseClass(
1333 Addr, Derived, CE->path_begin(), CE->path_end(),
1334 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1335 }
1336
1337 // TODO: Is there any reason to treat base-to-derived conversions
1338 // specially?
1339 default:
1340 break;
1341 }
1342 }
1343
1344 // Unary &.
1345 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1346 if (UO->getOpcode() == UO_AddrOf) {
1347 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1348 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1349 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1350 return LV.getAddress(CGF);
1351 }
1352 }
1353
1354 // std::addressof and variants.
1355 if (auto *Call = dyn_cast<CallExpr>(E)) {
1356 switch (Call->getBuiltinCallee()) {
1357 default:
1358 break;
1359 case Builtin::BIaddressof:
1360 case Builtin::BI__addressof:
1361 case Builtin::BI__builtin_addressof: {
1362 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1363 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1364 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1365 return LV.getAddress(CGF);
1366 }
1367 }
1368 }
1369
1370 // TODO: conditional operators, comma.
1371
1372 // Otherwise, use the alignment of the type.
1375 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1376}
1377
1378/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1379/// derive a more accurate bound on the alignment of the pointer.
1381 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1382 KnownNonNull_t IsKnownNonNull) {
1383 Address Addr =
1384 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1385 if (IsKnownNonNull && !Addr.isKnownNonNull())
1386 Addr.setKnownNonNull();
1387 return Addr;
1388}
1389
1391 llvm::Value *V = RV.getScalarVal();
1392 if (auto MPT = T->getAs<MemberPointerType>())
1393 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1394 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1395}
1396
1398 if (Ty->isVoidType())
1399 return RValue::get(nullptr);
1400
1401 switch (getEvaluationKind(Ty)) {
1402 case TEK_Complex: {
1403 llvm::Type *EltTy =
1405 llvm::Value *U = llvm::UndefValue::get(EltTy);
1406 return RValue::getComplex(std::make_pair(U, U));
1407 }
1408
1409 // If this is a use of an undefined aggregate type, the aggregate must have an
1410 // identifiable address. Just because the contents of the value are undefined
1411 // doesn't mean that the address can't be taken and compared.
1412 case TEK_Aggregate: {
1413 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1414 return RValue::getAggregate(DestPtr);
1415 }
1416
1417 case TEK_Scalar:
1418 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1419 }
1420 llvm_unreachable("bad evaluation kind");
1421}
1422
1424 const char *Name) {
1425 ErrorUnsupported(E, Name);
1426 return GetUndefRValue(E->getType());
1427}
1428
1430 const char *Name) {
1431 ErrorUnsupported(E, Name);
1432 llvm::Type *ElTy = ConvertType(E->getType());
1433 llvm::Type *Ty = UnqualPtrTy;
1434 return MakeAddrLValue(
1435 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1436}
1437
1438bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1439 const Expr *Base = Obj;
1440 while (!isa<CXXThisExpr>(Base)) {
1441 // The result of a dynamic_cast can be null.
1442 if (isa<CXXDynamicCastExpr>(Base))
1443 return false;
1444
1445 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1446 Base = CE->getSubExpr();
1447 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1448 Base = PE->getSubExpr();
1449 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1450 if (UO->getOpcode() == UO_Extension)
1451 Base = UO->getSubExpr();
1452 else
1453 return false;
1454 } else {
1455 return false;
1456 }
1457 }
1458 return true;
1459}
1460
1461LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1462 LValue LV;
1463 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1464 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1465 else
1466 LV = EmitLValue(E);
1467 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1468 SanitizerSet SkippedChecks;
1469 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1470 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1471 if (IsBaseCXXThis)
1472 SkippedChecks.set(SanitizerKind::Alignment, true);
1473 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1474 SkippedChecks.set(SanitizerKind::Null, true);
1475 }
1476 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1477 }
1478 return LV;
1479}
1480
1481/// EmitLValue - Emit code to compute a designator that specifies the location
1482/// of the expression.
1483///
1484/// This can return one of two things: a simple address or a bitfield reference.
1485/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1486/// an LLVM pointer type.
1487///
1488/// If this returns a bitfield reference, nothing about the pointee type of the
1489/// LLVM value is known: For example, it may not be a pointer to an integer.
1490///
1491/// If this returns a normal address, and if the lvalue's C type is fixed size,
1492/// this method guarantees that the returned pointer type will point to an LLVM
1493/// type of the same size of the lvalue's type. If the lvalue has a variable
1494/// length type, this is not possible.
1495///
1497 KnownNonNull_t IsKnownNonNull) {
1498 LValue LV = EmitLValueHelper(E, IsKnownNonNull);
1499 if (IsKnownNonNull && !LV.isKnownNonNull())
1500 LV.setKnownNonNull();
1501 return LV;
1502}
1503
1505 const ASTContext &Ctx) {
1506 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1507 if (isa<OpaqueValueExpr>(SE))
1508 return SE->getType();
1509 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1510}
1511
1512LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1513 KnownNonNull_t IsKnownNonNull) {
1514 ApplyDebugLocation DL(*this, E);
1515 switch (E->getStmtClass()) {
1516 default: return EmitUnsupportedLValue(E, "l-value expression");
1517
1518 case Expr::ObjCPropertyRefExprClass:
1519 llvm_unreachable("cannot emit a property reference directly");
1520
1521 case Expr::ObjCSelectorExprClass:
1522 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1523 case Expr::ObjCIsaExprClass:
1524 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1525 case Expr::BinaryOperatorClass:
1526 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1527 case Expr::CompoundAssignOperatorClass: {
1528 QualType Ty = E->getType();
1529 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1530 Ty = AT->getValueType();
1531 if (!Ty->isAnyComplexType())
1532 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1533 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1534 }
1535 case Expr::CallExprClass:
1536 case Expr::CXXMemberCallExprClass:
1537 case Expr::CXXOperatorCallExprClass:
1538 case Expr::UserDefinedLiteralClass:
1539 return EmitCallExprLValue(cast<CallExpr>(E));
1540 case Expr::CXXRewrittenBinaryOperatorClass:
1541 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1542 IsKnownNonNull);
1543 case Expr::VAArgExprClass:
1544 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1545 case Expr::DeclRefExprClass:
1546 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1547 case Expr::ConstantExprClass: {
1548 const ConstantExpr *CE = cast<ConstantExpr>(E);
1549 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1551 return MakeNaturalAlignAddrLValue(Result, RetType);
1552 }
1553 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1554 }
1555 case Expr::ParenExprClass:
1556 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1557 case Expr::GenericSelectionExprClass:
1558 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1559 IsKnownNonNull);
1560 case Expr::PredefinedExprClass:
1561 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1562 case Expr::StringLiteralClass:
1563 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1564 case Expr::ObjCEncodeExprClass:
1565 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1566 case Expr::PseudoObjectExprClass:
1567 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1568 case Expr::InitListExprClass:
1569 return EmitInitListLValue(cast<InitListExpr>(E));
1570 case Expr::CXXTemporaryObjectExprClass:
1571 case Expr::CXXConstructExprClass:
1572 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1573 case Expr::CXXBindTemporaryExprClass:
1574 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1575 case Expr::CXXUuidofExprClass:
1576 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1577 case Expr::LambdaExprClass:
1578 return EmitAggExprToLValue(E);
1579
1580 case Expr::ExprWithCleanupsClass: {
1581 const auto *cleanups = cast<ExprWithCleanups>(E);
1582 RunCleanupsScope Scope(*this);
1583 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1584 if (LV.isSimple()) {
1585 // Defend against branches out of gnu statement expressions surrounded by
1586 // cleanups.
1587 Address Addr = LV.getAddress(*this);
1588 llvm::Value *V = Addr.getBasePointer();
1589 Scope.ForceCleanup({&V});
1590 Addr.replaceBasePointer(V);
1591 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1592 LV.getBaseInfo(), LV.getTBAAInfo());
1593 }
1594 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1595 // bitfield lvalue or some other non-simple lvalue?
1596 return LV;
1597 }
1598
1599 case Expr::CXXDefaultArgExprClass: {
1600 auto *DAE = cast<CXXDefaultArgExpr>(E);
1601 CXXDefaultArgExprScope Scope(*this, DAE);
1602 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1603 }
1604 case Expr::CXXDefaultInitExprClass: {
1605 auto *DIE = cast<CXXDefaultInitExpr>(E);
1606 CXXDefaultInitExprScope Scope(*this, DIE);
1607 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1608 }
1609 case Expr::CXXTypeidExprClass:
1610 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1611
1612 case Expr::ObjCMessageExprClass:
1613 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1614 case Expr::ObjCIvarRefExprClass:
1615 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1616 case Expr::StmtExprClass:
1617 return EmitStmtExprLValue(cast<StmtExpr>(E));
1618 case Expr::UnaryOperatorClass:
1619 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1620 case Expr::ArraySubscriptExprClass:
1621 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1622 case Expr::MatrixSubscriptExprClass:
1623 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1624 case Expr::ArraySectionExprClass:
1625 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1626 case Expr::ExtVectorElementExprClass:
1627 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1628 case Expr::CXXThisExprClass:
1630 case Expr::MemberExprClass:
1631 return EmitMemberExpr(cast<MemberExpr>(E));
1632 case Expr::CompoundLiteralExprClass:
1633 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1634 case Expr::ConditionalOperatorClass:
1635 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1636 case Expr::BinaryConditionalOperatorClass:
1637 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1638 case Expr::ChooseExprClass:
1639 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1640 case Expr::OpaqueValueExprClass:
1641 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1642 case Expr::SubstNonTypeTemplateParmExprClass:
1643 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1644 IsKnownNonNull);
1645 case Expr::ImplicitCastExprClass:
1646 case Expr::CStyleCastExprClass:
1647 case Expr::CXXFunctionalCastExprClass:
1648 case Expr::CXXStaticCastExprClass:
1649 case Expr::CXXDynamicCastExprClass:
1650 case Expr::CXXReinterpretCastExprClass:
1651 case Expr::CXXConstCastExprClass:
1652 case Expr::CXXAddrspaceCastExprClass:
1653 case Expr::ObjCBridgedCastExprClass:
1654 return EmitCastLValue(cast<CastExpr>(E));
1655
1656 case Expr::MaterializeTemporaryExprClass:
1657 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1658
1659 case Expr::CoawaitExprClass:
1660 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1661 case Expr::CoyieldExprClass:
1662 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1663 case Expr::PackIndexingExprClass:
1664 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1665 }
1666}
1667
1668/// Given an object of the given canonical type, can we safely copy a
1669/// value out of it based on its initializer?
1671 assert(type.isCanonical());
1672 assert(!type->isReferenceType());
1673
1674 // Must be const-qualified but non-volatile.
1675 Qualifiers qs = type.getLocalQualifiers();
1676 if (!qs.hasConst() || qs.hasVolatile()) return false;
1677
1678 // Otherwise, all object types satisfy this except C++ classes with
1679 // mutable subobjects or non-trivial copy/destroy behavior.
1680 if (const auto *RT = dyn_cast<RecordType>(type))
1681 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1682 if (RD->hasMutableFields() || !RD->isTrivial())
1683 return false;
1684
1685 return true;
1686}
1687
1688/// Can we constant-emit a load of a reference to a variable of the
1689/// given type? This is different from predicates like
1690/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1691/// in situations that don't necessarily satisfy the language's rules
1692/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1693/// to do this with const float variables even if those variables
1694/// aren't marked 'constexpr'.
1702 type = type.getCanonicalType();
1703 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1704 if (isConstantEmittableObjectType(ref->getPointeeType()))
1706 return CEK_AsReferenceOnly;
1707 }
1709 return CEK_AsValueOnly;
1710 return CEK_None;
1711}
1712
1713/// Try to emit a reference to the given value without producing it as
1714/// an l-value. This is just an optimization, but it avoids us needing
1715/// to emit global copies of variables if they're named without triggering
1716/// a formal use in a context where we can't emit a direct reference to them,
1717/// for instance if a block or lambda or a member of a local class uses a
1718/// const int variable or constexpr variable from an enclosing function.
1719CodeGenFunction::ConstantEmission
1721 ValueDecl *value = refExpr->getDecl();
1722
1723 // The value needs to be an enum constant or a constant variable.
1725 if (isa<ParmVarDecl>(value)) {
1726 CEK = CEK_None;
1727 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1728 CEK = checkVarTypeForConstantEmission(var->getType());
1729 } else if (isa<EnumConstantDecl>(value)) {
1730 CEK = CEK_AsValueOnly;
1731 } else {
1732 CEK = CEK_None;
1733 }
1734 if (CEK == CEK_None) return ConstantEmission();
1735
1736 Expr::EvalResult result;
1737 bool resultIsReference;
1738 QualType resultType;
1739
1740 // It's best to evaluate all the way as an r-value if that's permitted.
1741 if (CEK != CEK_AsReferenceOnly &&
1742 refExpr->EvaluateAsRValue(result, getContext())) {
1743 resultIsReference = false;
1744 resultType = refExpr->getType();
1745
1746 // Otherwise, try to evaluate as an l-value.
1747 } else if (CEK != CEK_AsValueOnly &&
1748 refExpr->EvaluateAsLValue(result, getContext())) {
1749 resultIsReference = true;
1750 resultType = value->getType();
1751
1752 // Failure.
1753 } else {
1754 return ConstantEmission();
1755 }
1756
1757 // In any case, if the initializer has side-effects, abandon ship.
1758 if (result.HasSideEffects)
1759 return ConstantEmission();
1760
1761 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1762 // referencing a global host variable by copy. In this case the lambda should
1763 // make a copy of the value of the global host variable. The DRE of the
1764 // captured reference variable cannot be emitted as load from the host
1765 // global variable as compile time constant, since the host variable is not
1766 // accessible on device. The DRE of the captured reference variable has to be
1767 // loaded from captures.
1768 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1770 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1771 if (MD && MD->getParent()->isLambda() &&
1772 MD->getOverloadedOperator() == OO_Call) {
1773 const APValue::LValueBase &base = result.Val.getLValueBase();
1774 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1775 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1776 if (!VD->hasAttr<CUDADeviceAttr>()) {
1777 return ConstantEmission();
1778 }
1779 }
1780 }
1781 }
1782 }
1783
1784 // Emit as a constant.
1785 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1786 result.Val, resultType);
1787
1788 // Make sure we emit a debug reference to the global variable.
1789 // This should probably fire even for
1790 if (isa<VarDecl>(value)) {
1791 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1792 EmitDeclRefExprDbgValue(refExpr, result.Val);
1793 } else {
1794 assert(isa<EnumConstantDecl>(value));
1795 EmitDeclRefExprDbgValue(refExpr, result.Val);
1796 }
1797
1798 // If we emitted a reference constant, we need to dereference that.
1799 if (resultIsReference)
1801
1803}
1804
1806 const MemberExpr *ME) {
1807 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1808 // Try to emit static variable member expressions as DREs.
1809 return DeclRefExpr::Create(
1811 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1812 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1813 }
1814 return nullptr;
1815}
1816
1817CodeGenFunction::ConstantEmission
1820 return tryEmitAsConstant(DRE);
1821 return ConstantEmission();
1822}
1823
1825 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1826 assert(Constant && "not a constant");
1827 if (Constant.isReference())
1828 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1829 E->getExprLoc())
1830 .getScalarVal();
1831 return Constant.getValue();
1832}
1833
1834llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1835 SourceLocation Loc) {
1836 return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
1837 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1838 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1839}
1840
1842 if (Ty->isBooleanType())
1843 return true;
1844
1845 if (const EnumType *ET = Ty->getAs<EnumType>())
1846 return ET->getDecl()->getIntegerType()->isBooleanType();
1847
1848 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1849 return hasBooleanRepresentation(AT->getValueType());
1850
1851 return false;
1852}
1853
1855 llvm::APInt &Min, llvm::APInt &End,
1856 bool StrictEnums, bool IsBool) {
1857 const EnumType *ET = Ty->getAs<EnumType>();
1858 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1859 ET && !ET->getDecl()->isFixed();
1860 if (!IsBool && !IsRegularCPlusPlusEnum)
1861 return false;
1862
1863 if (IsBool) {
1864 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1865 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1866 } else {
1867 const EnumDecl *ED = ET->getDecl();
1868 ED->getValueRange(End, Min);
1869 }
1870 return true;
1871}
1872
1873llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1874 llvm::APInt Min, End;
1875 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1877 return nullptr;
1878
1879 llvm::MDBuilder MDHelper(getLLVMContext());
1880 return MDHelper.createRange(Min, End);
1881}
1882
1884 SourceLocation Loc) {
1885 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1886 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1887 if (!HasBoolCheck && !HasEnumCheck)
1888 return false;
1889
1890 bool IsBool = hasBooleanRepresentation(Ty) ||
1892 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1893 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1894 if (!NeedsBoolCheck && !NeedsEnumCheck)
1895 return false;
1896
1897 // Single-bit booleans don't need to be checked. Special-case this to avoid
1898 // a bit width mismatch when handling bitfield values. This is handled by
1899 // EmitFromMemory for the non-bitfield case.
1900 if (IsBool &&
1901 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1902 return false;
1903
1904 llvm::APInt Min, End;
1905 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1906 return true;
1907
1908 auto &Ctx = getLLVMContext();
1909 SanitizerScope SanScope(this);
1910 llvm::Value *Check;
1911 --End;
1912 if (!Min) {
1913 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1914 } else {
1915 llvm::Value *Upper =
1916 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1917 llvm::Value *Lower =
1918 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1919 Check = Builder.CreateAnd(Upper, Lower);
1920 }
1921 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1924 NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1925 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1926 StaticArgs, EmitCheckValue(Value));
1927 return true;
1928}
1929
1930llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1931 QualType Ty,
1932 SourceLocation Loc,
1933 LValueBaseInfo BaseInfo,
1934 TBAAAccessInfo TBAAInfo,
1935 bool isNontemporal) {
1936 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1937 if (GV->isThreadLocal())
1938 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1940
1941 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1942 // Boolean vectors use `iN` as storage type.
1943 if (ClangVecTy->isExtVectorBoolType()) {
1944 llvm::Type *ValTy = ConvertType(Ty);
1945 unsigned ValNumElems =
1946 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1947 // Load the `iP` storage object (P is the padded vector size).
1948 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1949 const auto *RawIntTy = RawIntV->getType();
1950 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1951 // Bitcast iP --> <P x i1>.
1952 auto *PaddedVecTy = llvm::FixedVectorType::get(
1953 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1954 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1955 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1956 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
1957
1958 return EmitFromMemory(V, Ty);
1959 }
1960
1961 // Handle vectors of size 3 like size 4 for better performance.
1962 const llvm::Type *EltTy = Addr.getElementType();
1963 const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1964
1965 if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
1966
1967 llvm::VectorType *vec4Ty =
1968 llvm::FixedVectorType::get(VTy->getElementType(), 4);
1969 Address Cast = Addr.withElementType(vec4Ty);
1970 // Now load value.
1971 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1972
1973 // Shuffle vector to get vec3.
1974 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
1975 return EmitFromMemory(V, Ty);
1976 }
1977 }
1978
1979 // Atomic operations have to be done on integral types.
1980 LValue AtomicLValue =
1981 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1982 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1983 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1984 }
1985
1986 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1987 if (isNontemporal) {
1988 llvm::MDNode *Node = llvm::MDNode::get(
1989 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1990 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
1991 }
1992
1993 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
1994
1995 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
1996 // In order to prevent the optimizer from throwing away the check, don't
1997 // attach range metadata to the load.
1998 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1999 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2000 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2001 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2002 llvm::MDNode::get(getLLVMContext(), std::nullopt));
2003 }
2004
2005 return EmitFromMemory(Load, Ty);
2006}
2007
2008llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2009 // Bool has a different representation in memory than in registers.
2010 if (hasBooleanRepresentation(Ty)) {
2011 // This should really always be an i1, but sometimes it's already
2012 // an i8, and it's awkward to track those cases down.
2013 if (Value->getType()->isIntegerTy(1))
2014 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
2015 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
2016 "wrong value rep of bool");
2017 }
2018
2019 return Value;
2020}
2021
2022llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2023 // Bool has a different representation in memory than in registers.
2024 if (hasBooleanRepresentation(Ty)) {
2025 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
2026 "wrong value rep of bool");
2027 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
2028 }
2029 if (Ty->isExtVectorBoolType()) {
2030 const auto *RawIntTy = Value->getType();
2031 // Bitcast iP --> <P x i1>.
2032 auto *PaddedVecTy = llvm::FixedVectorType::get(
2033 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2034 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2035 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2036 llvm::Type *ValTy = ConvertType(Ty);
2037 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2038 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2039 }
2040
2041 return Value;
2042}
2043
2044// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2045// MatrixType), if it points to a array (the memory type of MatrixType).
2047 CodeGenFunction &CGF,
2048 bool IsVector = true) {
2049 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2050 if (ArrayTy && IsVector) {
2051 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2052 ArrayTy->getNumElements());
2053
2054 return Addr.withElementType(VectorTy);
2055 }
2056 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2057 if (VectorTy && !IsVector) {
2058 auto *ArrayTy = llvm::ArrayType::get(
2059 VectorTy->getElementType(),
2060 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2061
2062 return Addr.withElementType(ArrayTy);
2063 }
2064
2065 return Addr;
2066}
2067
2068// Emit a store of a matrix LValue. This may require casting the original
2069// pointer to memory address (ArrayType) to a pointer to the value type
2070// (VectorType).
2071static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2072 bool isInit, CodeGenFunction &CGF) {
2073 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
2074 value->getType()->isVectorTy());
2075 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2076 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2077 lvalue.isNontemporal());
2078}
2079
2080void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2081 bool Volatile, QualType Ty,
2082 LValueBaseInfo BaseInfo,
2083 TBAAAccessInfo TBAAInfo,
2084 bool isInit, bool isNontemporal) {
2085 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2086 if (GV->isThreadLocal())
2087 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2089
2090 llvm::Type *SrcTy = Value->getType();
2091 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2092 auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
2093 if (VecTy && ClangVecTy->isExtVectorBoolType()) {
2094 auto *MemIntTy = cast<llvm::IntegerType>(Addr.getElementType());
2095 // Expand to the memory bit width.
2096 unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits();
2097 // <N x i1> --> <P x i1>.
2098 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2099 // <P x i1> --> iP.
2100 Value = Builder.CreateBitCast(Value, MemIntTy);
2101 } else if (!CGM.getCodeGenOpts().PreserveVec3Type) {
2102 // Handle vec3 special.
2103 if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
2104 // Our source is a vec3, do a shuffle vector to make it a vec4.
2105 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
2106 "extractVec");
2107 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
2108 }
2109 if (Addr.getElementType() != SrcTy) {
2110 Addr = Addr.withElementType(SrcTy);
2111 }
2112 }
2113 }
2114
2115 Value = EmitToMemory(Value, Ty);
2116
2117 LValue AtomicLValue =
2118 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2119 if (Ty->isAtomicType() ||
2120 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2121 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2122 return;
2123 }
2124
2125 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2126 if (isNontemporal) {
2127 llvm::MDNode *Node =
2128 llvm::MDNode::get(Store->getContext(),
2129 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2130 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2131 }
2132
2133 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2134}
2135
2136void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2137 bool isInit) {
2138 if (lvalue.getType()->isConstantMatrixType()) {
2139 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2140 return;
2141 }
2142
2143 EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
2144 lvalue.getType(), lvalue.getBaseInfo(),
2145 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2146}
2147
2148// Emit a load of a LValue of matrix type. This may require casting the pointer
2149// to memory address (ArrayType) to a pointer to the value type (VectorType).
2151 CodeGenFunction &CGF) {
2152 assert(LV.getType()->isConstantMatrixType());
2153 Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
2154 LV.setAddress(Addr);
2155 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2156}
2157
2158/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2159/// method emits the address of the lvalue, then loads the result as an rvalue,
2160/// returning the rvalue.
2162 if (LV.isObjCWeak()) {
2163 // load of a __weak object.
2164 Address AddrWeakObj = LV.getAddress(*this);
2166 AddrWeakObj));
2167 }
2169 // In MRC mode, we do a load+autorelease.
2170 if (!getLangOpts().ObjCAutoRefCount) {
2171 return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
2172 }
2173
2174 // In ARC mode, we load retained and then consume the value.
2175 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
2176 Object = EmitObjCConsumeObject(LV.getType(), Object);
2177 return RValue::get(Object);
2178 }
2179
2180 if (LV.isSimple()) {
2181 assert(!LV.getType()->isFunctionType());
2182
2183 if (LV.getType()->isConstantMatrixType())
2184 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2185
2186 // Everything needs a load.
2187 return RValue::get(EmitLoadOfScalar(LV, Loc));
2188 }
2189
2190 if (LV.isVectorElt()) {
2191 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2192 LV.isVolatileQualified());
2193 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2194 "vecext"));
2195 }
2196
2197 // If this is a reference to a subset of the elements of a vector, either
2198 // shuffle the input or extract/insert them as appropriate.
2199 if (LV.isExtVectorElt()) {
2201 }
2202
2203 // Global Register variables always invoke intrinsics
2204 if (LV.isGlobalReg())
2205 return EmitLoadOfGlobalRegLValue(LV);
2206
2207 if (LV.isMatrixElt()) {
2208 llvm::Value *Idx = LV.getMatrixIdx();
2209 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2210 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2211 llvm::MatrixBuilder MB(Builder);
2212 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2213 }
2214 llvm::LoadInst *Load =
2216 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2217 }
2218
2219 assert(LV.isBitField() && "Unknown LValue type!");
2220 return EmitLoadOfBitfieldLValue(LV, Loc);
2221}
2222
2224 SourceLocation Loc) {
2225 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2226
2227 // Get the output type.
2228 llvm::Type *ResLTy = ConvertType(LV.getType());
2229
2230 Address Ptr = LV.getBitFieldAddress();
2231 llvm::Value *Val =
2232 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2233
2234 bool UseVolatile = LV.isVolatileQualified() &&
2235 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2236 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2237 const unsigned StorageSize =
2238 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2239 if (Info.IsSigned) {
2240 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2241 unsigned HighBits = StorageSize - Offset - Info.Size;
2242 if (HighBits)
2243 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2244 if (Offset + HighBits)
2245 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2246 } else {
2247 if (Offset)
2248 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2249 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2250 Val = Builder.CreateAnd(
2251 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2252 }
2253 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2254 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2255 return RValue::get(Val);
2256}
2257
2258// If this is a reference to a subset of the elements of a vector, create an
2259// appropriate shufflevector.
2261 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2262 LV.isVolatileQualified());
2263
2264 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2265 // IR value to a vector here allows the rest of codegen to behave as normal.
2266 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2267 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2268 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2269 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2270 }
2271
2272 const llvm::Constant *Elts = LV.getExtVectorElts();
2273
2274 // If the result of the expression is a non-vector type, we must be extracting
2275 // a single element. Just codegen as an extractelement.
2276 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2277 if (!ExprVT) {
2278 unsigned InIdx = getAccessedFieldNo(0, Elts);
2279 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2280 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2281 }
2282
2283 // Always use shuffle vector to try to retain the original program structure
2284 unsigned NumResultElts = ExprVT->getNumElements();
2285
2287 for (unsigned i = 0; i != NumResultElts; ++i)
2288 Mask.push_back(getAccessedFieldNo(i, Elts));
2289
2290 Vec = Builder.CreateShuffleVector(Vec, Mask);
2291 return RValue::get(Vec);
2292}
2293
2294/// Generates lvalue for partial ext_vector access.
2296 Address VectorAddress = LV.getExtVectorAddress();
2297 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2298 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2299
2300 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2301
2302 const llvm::Constant *Elts = LV.getExtVectorElts();
2303 unsigned ix = getAccessedFieldNo(0, Elts);
2304
2305 Address VectorBasePtrPlusIx =
2306 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2307 "vector.elt");
2308
2309 return VectorBasePtrPlusIx;
2310}
2311
2312/// Load of global gamed gegisters are always calls to intrinsics.
2314 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2315 "Bad type for register variable");
2316 llvm::MDNode *RegName = cast<llvm::MDNode>(
2317 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2318
2319 // We accept integer and pointer types only
2320 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2321 llvm::Type *Ty = OrigTy;
2322 if (OrigTy->isPointerTy())
2323 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2324 llvm::Type *Types[] = { Ty };
2325
2326 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2327 llvm::Value *Call = Builder.CreateCall(
2328 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2329 if (OrigTy->isPointerTy())
2330 Call = Builder.CreateIntToPtr(Call, OrigTy);
2331 return RValue::get(Call);
2332}
2333
2334/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2335/// lvalue, where both are guaranteed to the have the same type, and that type
2336/// is 'Ty'.
2338 bool isInit) {
2339 if (!Dst.isSimple()) {
2340 if (Dst.isVectorElt()) {
2341 // Read/modify/write the vector, inserting the new element.
2342 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2343 Dst.isVolatileQualified());
2344 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2345 if (IRStoreTy) {
2346 auto *IRVecTy = llvm::FixedVectorType::get(
2347 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2348 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2349 // iN --> <N x i1>.
2350 }
2351 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2352 Dst.getVectorIdx(), "vecins");
2353 if (IRStoreTy) {
2354 // <N x i1> --> <iN>.
2355 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2356 }
2358 Dst.isVolatileQualified());
2359 return;
2360 }
2361
2362 // If this is an update of extended vector elements, insert them as
2363 // appropriate.
2364 if (Dst.isExtVectorElt())
2366
2367 if (Dst.isGlobalReg())
2368 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2369
2370 if (Dst.isMatrixElt()) {
2371 llvm::Value *Idx = Dst.getMatrixIdx();
2372 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2373 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2374 llvm::MatrixBuilder MB(Builder);
2375 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2376 }
2377 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2378 llvm::Value *Vec =
2379 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2381 Dst.isVolatileQualified());
2382 return;
2383 }
2384
2385 assert(Dst.isBitField() && "Unknown LValue type");
2386 return EmitStoreThroughBitfieldLValue(Src, Dst);
2387 }
2388
2389 // There's special magic for assigning into an ARC-qualified l-value.
2390 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2391 switch (Lifetime) {
2393 llvm_unreachable("present but none");
2394
2396 // nothing special
2397 break;
2398
2400 if (isInit) {
2401 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2402 break;
2403 }
2404 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2405 return;
2406
2408 if (isInit)
2409 // Initialize and then skip the primitive store.
2410 EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
2411 else
2412 EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
2413 /*ignore*/ true);
2414 return;
2415
2418 Src.getScalarVal()));
2419 // fall into the normal path
2420 break;
2421 }
2422 }
2423
2424 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2425 // load of a __weak object.
2426 Address LvalueDst = Dst.getAddress(*this);
2427 llvm::Value *src = Src.getScalarVal();
2428 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2429 return;
2430 }
2431
2432 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2433 // load of a __strong object.
2434 Address LvalueDst = Dst.getAddress(*this);
2435 llvm::Value *src = Src.getScalarVal();
2436 if (Dst.isObjCIvar()) {
2437 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2438 llvm::Type *ResultType = IntPtrTy;
2440 llvm::Value *RHS = dst.emitRawPointer(*this);
2441 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2442 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2443 ResultType, "sub.ptr.lhs.cast");
2444 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2445 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2446 } else if (Dst.isGlobalObjCRef()) {
2447 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2448 Dst.isThreadLocalRef());
2449 }
2450 else
2451 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2452 return;
2453 }
2454
2455 assert(Src.isScalar() && "Can't emit an agg store with this method");
2456 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2457}
2458
2460 llvm::Value **Result) {
2461 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2462 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
2463 Address Ptr = Dst.getBitFieldAddress();
2464
2465 // Get the source value, truncated to the width of the bit-field.
2466 llvm::Value *SrcVal = Src.getScalarVal();
2467
2468 // Cast the source to the storage type and shift it into place.
2469 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2470 /*isSigned=*/false);
2471 llvm::Value *MaskedVal = SrcVal;
2472
2473 const bool UseVolatile =
2474 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2475 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2476 const unsigned StorageSize =
2477 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2478 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2479 // See if there are other bits in the bitfield's storage we'll need to load
2480 // and mask together with source before storing.
2481 if (StorageSize != Info.Size) {
2482 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2483 llvm::Value *Val =
2484 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2485
2486 // Mask the source value as needed.
2488 SrcVal = Builder.CreateAnd(
2489 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2490 "bf.value");
2491 MaskedVal = SrcVal;
2492 if (Offset)
2493 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2494
2495 // Mask out the original value.
2496 Val = Builder.CreateAnd(
2497 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2498 "bf.clear");
2499
2500 // Or together the unchanged values and the source value.
2501 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2502 } else {
2503 assert(Offset == 0);
2504 // According to the AACPS:
2505 // When a volatile bit-field is written, and its container does not overlap
2506 // with any non-bit-field member, its container must be read exactly once
2507 // and written exactly once using the access width appropriate to the type
2508 // of the container. The two accesses are not atomic.
2509 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2510 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2511 Builder.CreateLoad(Ptr, true, "bf.load");
2512 }
2513
2514 // Write the new value back out.
2515 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2516
2517 // Return the new value of the bit-field, if requested.
2518 if (Result) {
2519 llvm::Value *ResultVal = MaskedVal;
2520
2521 // Sign extend the value if needed.
2522 if (Info.IsSigned) {
2523 assert(Info.Size <= StorageSize);
2524 unsigned HighBits = StorageSize - Info.Size;
2525 if (HighBits) {
2526 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2527 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2528 }
2529 }
2530
2531 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2532 "bf.result.cast");
2533 *Result = EmitFromMemory(ResultVal, Dst.getType());
2534 }
2535}
2536
2538 LValue Dst) {
2539 // HLSL allows storing to scalar values through ExtVector component LValues.
2540 // To support this we need to handle the case where the destination address is
2541 // a scalar.
2542 Address DstAddr = Dst.getExtVectorAddress();
2543 if (!DstAddr.getElementType()->isVectorTy()) {
2544 assert(!Dst.getType()->isVectorType() &&
2545 "this should only occur for non-vector l-values");
2546 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2547 return;
2548 }
2549
2550 // This access turns into a read/modify/write of the vector. Load the input
2551 // value now.
2552 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2553 const llvm::Constant *Elts = Dst.getExtVectorElts();
2554
2555 llvm::Value *SrcVal = Src.getScalarVal();
2556
2557 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2558 unsigned NumSrcElts = VTy->getNumElements();
2559 unsigned NumDstElts =
2560 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2561 if (NumDstElts == NumSrcElts) {
2562 // Use shuffle vector is the src and destination are the same number of
2563 // elements and restore the vector mask since it is on the side it will be
2564 // stored.
2565 SmallVector<int, 4> Mask(NumDstElts);
2566 for (unsigned i = 0; i != NumSrcElts; ++i)
2567 Mask[getAccessedFieldNo(i, Elts)] = i;
2568
2569 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2570 } else if (NumDstElts > NumSrcElts) {
2571 // Extended the source vector to the same length and then shuffle it
2572 // into the destination.
2573 // FIXME: since we're shuffling with undef, can we just use the indices
2574 // into that? This could be simpler.
2575 SmallVector<int, 4> ExtMask;
2576 for (unsigned i = 0; i != NumSrcElts; ++i)
2577 ExtMask.push_back(i);
2578 ExtMask.resize(NumDstElts, -1);
2579 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2580 // build identity
2582 for (unsigned i = 0; i != NumDstElts; ++i)
2583 Mask.push_back(i);
2584
2585 // When the vector size is odd and .odd or .hi is used, the last element
2586 // of the Elts constant array will be one past the size of the vector.
2587 // Ignore the last element here, if it is greater than the mask size.
2588 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2589 NumSrcElts--;
2590
2591 // modify when what gets shuffled in
2592 for (unsigned i = 0; i != NumSrcElts; ++i)
2593 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2594 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2595 } else {
2596 // We should never shorten the vector
2597 llvm_unreachable("unexpected shorten vector length");
2598 }
2599 } else {
2600 // If the Src is a scalar (not a vector), and the target is a vector it must
2601 // be updating one element.
2602 unsigned InIdx = getAccessedFieldNo(0, Elts);
2603 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2604 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2605 }
2606
2608 Dst.isVolatileQualified());
2609}
2610
2611/// Store of global named registers are always calls to intrinsics.
2613 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2614 "Bad type for register variable");
2615 llvm::MDNode *RegName = cast<llvm::MDNode>(
2616 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2617 assert(RegName && "Register LValue is not metadata");
2618
2619 // We accept integer and pointer types only
2620 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2621 llvm::Type *Ty = OrigTy;
2622 if (OrigTy->isPointerTy())
2623 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2624 llvm::Type *Types[] = { Ty };
2625
2626 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2627 llvm::Value *Value = Src.getScalarVal();
2628 if (OrigTy->isPointerTy())
2629 Value = Builder.CreatePtrToInt(Value, Ty);
2630 Builder.CreateCall(
2631 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2632}
2633
2634// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2635// generating write-barries API. It is currently a global, ivar,
2636// or neither.
2637static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2638 LValue &LV,
2639 bool IsMemberAccess=false) {
2640 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2641 return;
2642
2643 if (isa<ObjCIvarRefExpr>(E)) {
2644 QualType ExpTy = E->getType();
2645 if (IsMemberAccess && ExpTy->isPointerType()) {
2646 // If ivar is a structure pointer, assigning to field of
2647 // this struct follows gcc's behavior and makes it a non-ivar
2648 // writer-barrier conservatively.
2649 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2650 if (ExpTy->isRecordType()) {
2651 LV.setObjCIvar(false);
2652 return;
2653 }
2654 }
2655 LV.setObjCIvar(true);
2656 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2657 LV.setBaseIvarExp(Exp->getBase());
2658 LV.setObjCArray(E->getType()->isArrayType());
2659 return;
2660 }
2661
2662 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2663 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2664 if (VD->hasGlobalStorage()) {
2665 LV.setGlobalObjCRef(true);
2666 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2667 }
2668 }
2669 LV.setObjCArray(E->getType()->isArrayType());
2670 return;
2671 }
2672
2673 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2674 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2675 return;
2676 }
2677
2678 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2679 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2680 if (LV.isObjCIvar()) {
2681 // If cast is to a structure pointer, follow gcc's behavior and make it
2682 // a non-ivar write-barrier.
2683 QualType ExpTy = E->getType();
2684 if (ExpTy->isPointerType())
2685 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2686 if (ExpTy->isRecordType())
2687 LV.setObjCIvar(false);
2688 }
2689 return;
2690 }
2691
2692 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2693 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2694 return;
2695 }
2696
2697 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2698 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2699 return;
2700 }
2701
2702 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2703 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2704 return;
2705 }
2706
2707 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2708 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2709 return;
2710 }
2711
2712 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2713 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2714 if (LV.isObjCIvar() && !LV.isObjCArray())
2715 // Using array syntax to assigning to what an ivar points to is not
2716 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2717 LV.setObjCIvar(false);
2718 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2719 // Using array syntax to assigning to what global points to is not
2720 // same as assigning to the global itself. {id *G;} G[i] = 0;
2721 LV.setGlobalObjCRef(false);
2722 return;
2723 }
2724
2725 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2726 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2727 // We don't know if member is an 'ivar', but this flag is looked at
2728 // only in the context of LV.isObjCIvar().
2729 LV.setObjCArray(E->getType()->isArrayType());
2730 return;
2731 }
2732}
2733
2735 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2736 llvm::Type *RealVarTy, SourceLocation Loc) {
2737 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2739 CGF, VD, Addr, Loc);
2740 else
2741 Addr =
2742 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2743
2744 Addr = Addr.withElementType(RealVarTy);
2745 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2746}
2747
2749 const VarDecl *VD, QualType T) {
2750 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2751 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2752 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2753 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2754 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2755 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2756 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2758 return Address::invalid();
2759 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2760 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2761 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2763 "Expected link clause OR to clause with unified memory enabled.");
2764 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2766 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2767}
2768
2769Address
2771 LValueBaseInfo *PointeeBaseInfo,
2772 TBAAAccessInfo *PointeeTBAAInfo) {
2773 llvm::LoadInst *Load =
2774 Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
2776 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2777 CharUnits(), /*ForPointeeType=*/true,
2778 PointeeBaseInfo, PointeeTBAAInfo);
2779}
2780
2782 LValueBaseInfo PointeeBaseInfo;
2783 TBAAAccessInfo PointeeTBAAInfo;
2784 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2785 &PointeeTBAAInfo);
2786 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2787 PointeeBaseInfo, PointeeTBAAInfo);
2788}
2789
2791 const PointerType *PtrTy,
2792 LValueBaseInfo *BaseInfo,
2793 TBAAAccessInfo *TBAAInfo) {
2794 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2795 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2796 CharUnits(), /*ForPointeeType=*/true,
2797 BaseInfo, TBAAInfo);
2798}
2799
2801 const PointerType *PtrTy) {
2802 LValueBaseInfo BaseInfo;
2803 TBAAAccessInfo TBAAInfo;
2804 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2805 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2806}
2807
2809 const Expr *E, const VarDecl *VD) {
2810 QualType T = E->getType();
2811
2812 // If it's thread_local, emit a call to its wrapper function instead.
2813 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2815 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2816 // Check if the variable is marked as declare target with link clause in
2817 // device codegen.
2818 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2819 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2820 if (Addr.isValid())
2821 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2822 }
2823
2824 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2825
2826 if (VD->getTLSKind() != VarDecl::TLS_None)
2827 V = CGF.Builder.CreateThreadLocalAddress(V);
2828
2829 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2830 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2831 Address Addr(V, RealVarTy, Alignment);
2832 // Emit reference to the private copy of the variable if it is an OpenMP
2833 // threadprivate variable.
2834 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2835 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2836 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2837 E->getExprLoc());
2838 }
2839 LValue LV = VD->getType()->isReferenceType() ?
2840 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2843 setObjCGCLValueClass(CGF.getContext(), E, LV);
2844 return LV;
2845}
2846
2847static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
2848 GlobalDecl GD) {
2849 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2850 if (FD->hasAttr<WeakRefAttr>()) {
2851 ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
2852 return aliasee.getPointer();
2853 }
2854
2855 llvm::Constant *V = CGM.GetAddrOfFunction(GD);
2856 return V;
2857}
2858
2860 GlobalDecl GD) {
2861 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2862 llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD);
2863 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2864 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2866}
2867
2869 llvm::Value *ThisValue) {
2870
2871 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2872}
2873
2874/// Named Registers are named metadata pointing to the register name
2875/// which will be read from/written to as an argument to the intrinsic
2876/// @llvm.read/write_register.
2877/// So far, only the name is being passed down, but other options such as
2878/// register type, allocation type or even optimization options could be
2879/// passed down via the metadata node.
2881 SmallString<64> Name("llvm.named.register.");
2882 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2883 assert(Asm->getLabel().size() < 64-Name.size() &&
2884 "Register name too big");
2885 Name.append(Asm->getLabel());
2886 llvm::NamedMDNode *M =
2887 CGM.getModule().getOrInsertNamedMetadata(Name);
2888 if (M->getNumOperands() == 0) {
2889 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2890 Asm->getLabel());
2891 llvm::Metadata *Ops[] = {Str};
2892 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2893 }
2894
2895 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2896
2897 llvm::Value *Ptr =
2898 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2899 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2900}
2901
2902/// Determine whether we can emit a reference to \p VD from the current
2903/// context, despite not necessarily having seen an odr-use of the variable in
2904/// this context.
2906 const DeclRefExpr *E,
2907 const VarDecl *VD) {
2908 // For a variable declared in an enclosing scope, do not emit a spurious
2909 // reference even if we have a capture, as that will emit an unwarranted
2910 // reference to our capture state, and will likely generate worse code than
2911 // emitting a local copy.
2913 return false;
2914
2915 // For a local declaration declared in this function, we can always reference
2916 // it even if we don't have an odr-use.
2917 if (VD->hasLocalStorage()) {
2918 return VD->getDeclContext() ==
2919 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2920 }
2921
2922 // For a global declaration, we can emit a reference to it if we know
2923 // for sure that we are able to emit a definition of it.
2924 VD = VD->getDefinition(CGF.getContext());
2925 if (!VD)
2926 return false;
2927
2928 // Don't emit a spurious reference if it might be to a variable that only
2929 // exists on a different device / target.
2930 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2931 // cross-target reference.
2932 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2933 CGF.getLangOpts().OpenCL) {
2934 return false;
2935 }
2936
2937 // We can emit a spurious reference only if the linkage implies that we'll
2938 // be emitting a non-interposable symbol that will be retained until link
2939 // time.
2940 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
2941 case llvm::GlobalValue::ExternalLinkage:
2942 case llvm::GlobalValue::LinkOnceODRLinkage:
2943 case llvm::GlobalValue::WeakODRLinkage:
2944 case llvm::GlobalValue::InternalLinkage:
2945 case llvm::GlobalValue::PrivateLinkage:
2946 return true;
2947 default:
2948 return false;
2949 }
2950}
2951
2953 const NamedDecl *ND = E->getDecl();
2954 QualType T = E->getType();
2955
2956 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2957 "should not emit an unevaluated operand");
2958
2959 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2960 // Global Named registers access via intrinsics only
2961 if (VD->getStorageClass() == SC_Register &&
2962 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2963 return EmitGlobalNamedRegister(VD, CGM);
2964
2965 // If this DeclRefExpr does not constitute an odr-use of the variable,
2966 // we're not permitted to emit a reference to it in general, and it might
2967 // not be captured if capture would be necessary for a use. Emit the
2968 // constant value directly instead.
2969 if (E->isNonOdrUse() == NOUR_Constant &&
2970 (VD->getType()->isReferenceType() ||
2971 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
2972 VD->getAnyInitializer(VD);
2973 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
2974 E->getLocation(), *VD->evaluateValue(), VD->getType());
2975 assert(Val && "failed to emit constant expression");
2976
2977 Address Addr = Address::invalid();
2978 if (!VD->getType()->isReferenceType()) {
2979 // Spill the constant value to a global.
2980 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
2981 getContext().getDeclAlign(VD));
2982 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
2983 auto *PTy = llvm::PointerType::get(
2984 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
2985 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
2986 } else {
2987 // Should we be using the alignment of the constant pointer we emitted?
2988 CharUnits Alignment =
2990 /* BaseInfo= */ nullptr,
2991 /* TBAAInfo= */ nullptr,
2992 /* forPointeeType= */ true);
2993 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
2994 }
2995 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2996 }
2997
2998 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
2999
3000 // Check for captured variables.
3002 VD = VD->getCanonicalDecl();
3003 if (auto *FD = LambdaCaptureFields.lookup(VD))
3004 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3005 if (CapturedStmtInfo) {
3006 auto I = LocalDeclMap.find(VD);
3007 if (I != LocalDeclMap.end()) {
3008 LValue CapLVal;
3009 if (VD->getType()->isReferenceType())
3010 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3012 else
3013 CapLVal = MakeAddrLValue(I->second, T);
3014 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3015 // in simd context.
3016 if (getLangOpts().OpenMP &&
3018 CapLVal.setNontemporal(/*Value=*/true);
3019 return CapLVal;
3020 }
3021 LValue CapLVal =
3024 Address LValueAddress = CapLVal.getAddress(*this);
3025 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3026 LValueAddress.getElementType(),
3027 getContext().getDeclAlign(VD)),
3028 CapLVal.getType(),
3030 CapLVal.getTBAAInfo());
3031 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3032 // in simd context.
3033 if (getLangOpts().OpenMP &&
3035 CapLVal.setNontemporal(/*Value=*/true);
3036 return CapLVal;
3037 }
3038
3039 assert(isa<BlockDecl>(CurCodeDecl));
3040 Address addr = GetAddrOfBlockDecl(VD);
3041 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3042 }
3043 }
3044
3045 // FIXME: We should be able to assert this for FunctionDecls as well!
3046 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3047 // those with a valid source location.
3048 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3049 !E->getLocation().isValid()) &&
3050 "Should not use decl without marking it used!");
3051
3052 if (ND->hasAttr<WeakRefAttr>()) {
3053 const auto *VD = cast<ValueDecl>(ND);
3055 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3056 }
3057
3058 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3059 // Check if this is a global variable.
3060 if (VD->hasLinkage() || VD->isStaticDataMember())
3061 return EmitGlobalVarDeclLValue(*this, E, VD);
3062
3063 Address addr = Address::invalid();
3064
3065 // The variable should generally be present in the local decl map.
3066 auto iter = LocalDeclMap.find(VD);
3067 if (iter != LocalDeclMap.end()) {
3068 addr = iter->second;
3069
3070 // Otherwise, it might be static local we haven't emitted yet for
3071 // some reason; most likely, because it's in an outer function.
3072 } else if (VD->isStaticLocal()) {
3073 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3075 addr = Address(
3076 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3077
3078 // No other cases for now.
3079 } else {
3080 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3081 }
3082
3083 // Handle threadlocal function locals.
3084 if (VD->getTLSKind() != VarDecl::TLS_None)
3085 addr = addr.withPointer(
3086 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3088
3089 // Check for OpenMP threadprivate variables.
3090 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3091 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3093 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3094 E->getExprLoc());
3095 }
3096
3097 // Drill into block byref variables.
3098 bool isBlockByref = VD->isEscapingByref();
3099 if (isBlockByref) {
3100 addr = emitBlockByrefAddress(addr, VD);
3101 }
3102
3103 // Drill into reference types.
3104 LValue LV = VD->getType()->isReferenceType() ?
3105 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3107
3108 bool isLocalStorage = VD->hasLocalStorage();
3109
3110 bool NonGCable = isLocalStorage &&
3111 !VD->getType()->isReferenceType() &&
3112 !isBlockByref;
3113 if (NonGCable) {
3115 LV.setNonGC(true);
3116 }
3117
3118 bool isImpreciseLifetime =
3119 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3120 if (isImpreciseLifetime)
3123 return LV;
3124 }
3125
3126 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
3127 LValue LV = EmitFunctionDeclLValue(*this, E, FD);
3128
3129 // Emit debuginfo for the function declaration if the target wants to.
3130 if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
3131 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) {
3132 auto *Fn =
3133 cast<llvm::Function>(LV.getPointer(*this)->stripPointerCasts());
3134 if (!Fn->getSubprogram())
3135 DI->EmitFunctionDecl(FD, FD->getLocation(), T, Fn);
3136 }
3137 }
3138
3139 return LV;
3140 }
3141
3142 // FIXME: While we're emitting a binding from an enclosing scope, all other
3143 // DeclRefExprs we see should be implicitly treated as if they also refer to
3144 // an enclosing scope.
3145 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3147 auto *FD = LambdaCaptureFields.lookup(BD);
3148 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3149 }
3150 return EmitLValue(BD->getBinding());
3151 }
3152
3153 // We can form DeclRefExprs naming GUID declarations when reconstituting
3154 // non-type template parameters into expressions.
3155 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3158
3159 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3160 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3161 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3162
3163 if (AS != T.getAddressSpace()) {
3164 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3165 auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS);
3167 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3168 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3169 }
3170
3171 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3172 }
3173
3174 llvm_unreachable("Unhandled DeclRefExpr");
3175}
3176
3178 // __extension__ doesn't affect lvalue-ness.
3179 if (E->getOpcode() == UO_Extension)
3180 return EmitLValue(E->getSubExpr());
3181
3183 switch (E->getOpcode()) {
3184 default: llvm_unreachable("Unknown unary operator lvalue!");
3185 case UO_Deref: {
3187 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3188
3189 LValueBaseInfo BaseInfo;
3190 TBAAAccessInfo TBAAInfo;
3191 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3192 &TBAAInfo);
3193 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3195
3196 // We should not generate __weak write barrier on indirect reference
3197 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3198 // But, we continue to generate __strong write barrier on indirect write
3199 // into a pointer to object.
3200 if (getLangOpts().ObjC &&
3201 getLangOpts().getGC() != LangOptions::NonGC &&
3202 LV.isObjCWeak())
3204 return LV;
3205 }
3206 case UO_Real:
3207 case UO_Imag: {
3208 LValue LV = EmitLValue(E->getSubExpr());
3209 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3210
3211 // __real is valid on scalars. This is a faster way of testing that.
3212 // __imag can only produce an rvalue on scalars.
3213 if (E->getOpcode() == UO_Real &&
3214 !LV.getAddress(*this).getElementType()->isStructTy()) {
3215 assert(E->getSubExpr()->getType()->isArithmeticType());
3216 return LV;
3217 }
3218
3219 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3220
3221 Address Component =
3222 (E->getOpcode() == UO_Real
3223 ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
3224 : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
3225 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3227 ElemLV.getQuals().addQualifiers(LV.getQuals());
3228 return ElemLV;
3229 }
3230 case UO_PreInc:
3231 case UO_PreDec: {
3232 LValue LV = EmitLValue(E->getSubExpr());
3233 bool isInc = E->getOpcode() == UO_PreInc;
3234
3235 if (E->getType()->isAnyComplexType())
3236 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3237 else
3238 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3239 return LV;
3240 }
3241 }
3242}
3243
3247}
3248
3252}
3253
3255 auto SL = E->getFunctionName();
3256 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3257 StringRef FnName = CurFn->getName();
3258 if (FnName.starts_with("\01"))
3259 FnName = FnName.substr(1);
3260 StringRef NameItems[] = {
3262 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3263 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3264 std::string Name = std::string(SL->getString());
3265 if (!Name.empty()) {
3266 unsigned Discriminator =
3268 if (Discriminator)
3269 Name += "_" + Twine(Discriminator + 1).str();
3270 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3272 } else {
3273 auto C =
3274 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3276 }
3277 }
3278 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3280}
3281
3282/// Emit a type description suitable for use by a runtime sanitizer library. The
3283/// format of a type descriptor is
3284///
3285/// \code
3286/// { i16 TypeKind, i16 TypeInfo }
3287/// \endcode
3288///
3289/// followed by an array of i8 containing the type name. TypeKind is 0 for an
3290/// integer, 1 for a floating point value, and -1 for anything else.
3292 // Only emit each type's descriptor once.
3293 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3294 return C;
3295
3296 uint16_t TypeKind = -1;
3297 uint16_t TypeInfo = 0;
3298
3299 if (T->isIntegerType()) {
3300 TypeKind = 0;
3301 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3302 (T->isSignedIntegerType() ? 1 : 0);
3303 } else if (T->isFloatingType()) {
3304 TypeKind = 1;
3306 }
3307
3308 // Format the type name as if for a diagnostic, including quotes and
3309 // optionally an 'aka'.
3310 SmallString<32> Buffer;
3312 DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(),
3313 StringRef(), std::nullopt, Buffer, std::nullopt);
3314
3315 llvm::Constant *Components[] = {
3316 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3317 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3318 };
3319 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3320
3321 auto *GV = new llvm::GlobalVariable(
3322 CGM.getModule(), Descriptor->getType(),
3323 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3324 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3326
3327 // Remember the descriptor for this type.
3329
3330 return GV;
3331}
3332
3333llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3334 llvm::Type *TargetTy = IntPtrTy;
3335
3336 if (V->getType() == TargetTy)
3337 return V;
3338
3339 // Floating-point types which fit into intptr_t are bitcast to integers
3340 // and then passed directly (after zero-extension, if necessary).
3341 if (V->getType()->isFloatingPointTy()) {
3342 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3343 if (Bits <= TargetTy->getIntegerBitWidth())
3344 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3345 Bits));
3346 }
3347
3348 // Integers which fit in intptr_t are zero-extended and passed directly.
3349 if (V->getType()->isIntegerTy() &&
3350 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3351 return Builder.CreateZExt(V, TargetTy);
3352
3353 // Pointers are passed directly, everything else is passed by address.
3354 if (!V->getType()->isPointerTy()) {
3355 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3356 Builder.CreateStore(V, Ptr);
3357 V = Ptr.getPointer();
3358 }
3359 return Builder.CreatePtrToInt(V, TargetTy);
3360}
3361
3362/// Emit a representation of a SourceLocation for passing to a handler
3363/// in a sanitizer runtime library. The format for this data is:
3364/// \code
3365/// struct SourceLocation {
3366/// const char *Filename;
3367/// int32_t Line, Column;
3368/// };
3369/// \endcode
3370/// For an invalid SourceLocation, the Filename pointer is null.
3372 llvm::Constant *Filename;
3373 int Line, Column;
3374
3376 if (PLoc.isValid()) {
3377 StringRef FilenameString = PLoc.getFilename();
3378
3379 int PathComponentsToStrip =
3380 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3381 if (PathComponentsToStrip < 0) {
3382 assert(PathComponentsToStrip != INT_MIN);
3383 int PathComponentsToKeep = -PathComponentsToStrip;
3384 auto I = llvm::sys::path::rbegin(FilenameString);
3385 auto E = llvm::sys::path::rend(FilenameString);
3386 while (I != E && --PathComponentsToKeep)
3387 ++I;
3388
3389 FilenameString = FilenameString.substr(I - E);
3390 } else if (PathComponentsToStrip > 0) {
3391 auto I = llvm::sys::path::begin(FilenameString);
3392 auto E = llvm::sys::path::end(FilenameString);
3393 while (I != E && PathComponentsToStrip--)
3394 ++I;
3395
3396 if (I != E)
3397 FilenameString =
3398 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3399 else
3400 FilenameString = llvm::sys::path::filename(FilenameString);
3401 }
3402
3403 auto FilenameGV =
3404 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3406 cast<llvm::GlobalVariable>(
3407 FilenameGV.getPointer()->stripPointerCasts()));
3408 Filename = FilenameGV.getPointer();
3409 Line = PLoc.getLine();
3410 Column = PLoc.getColumn();
3411 } else {
3412 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3413 Line = Column = 0;
3414 }
3415
3416 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3417 Builder.getInt32(Column)};
3418
3419 return llvm::ConstantStruct::getAnon(Data);
3420}
3421
3422namespace {
3423/// Specify under what conditions this check can be recovered
3424enum class CheckRecoverableKind {
3425 /// Always terminate program execution if this check fails.
3427 /// Check supports recovering, runtime has both fatal (noreturn) and
3428 /// non-fatal handlers for this check.
3429 Recoverable,
3430 /// Runtime conditionally aborts, always need to support recovery.
3432};
3433}
3434
3435static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3436 assert(Kind.countPopulation() == 1);
3437 if (Kind == SanitizerKind::Vptr)
3438 return CheckRecoverableKind::AlwaysRecoverable;
3439 else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
3440 return CheckRecoverableKind::Unrecoverable;
3441 else
3442 return CheckRecoverableKind::Recoverable;
3443}
3444
3445namespace {
3446struct SanitizerHandlerInfo {
3447 char const *const Name;
3448 unsigned Version;
3449};
3450}
3451
3452const SanitizerHandlerInfo SanitizerHandlers[] = {
3453#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3455#undef SANITIZER_CHECK
3456};
3457
3459 llvm::FunctionType *FnType,
3461 SanitizerHandler CheckHandler,
3462 CheckRecoverableKind RecoverKind, bool IsFatal,
3463 llvm::BasicBlock *ContBB) {
3464 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3465 std::optional<ApplyDebugLocation> DL;
3466 if (!CGF.Builder.getCurrentDebugLocation()) {
3467 // Ensure that the call has at least an artificial debug location.
3468 DL.emplace(CGF, SourceLocation());
3469 }
3470 bool NeedsAbortSuffix =
3471 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3472 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3473 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3474 const StringRef CheckName = CheckInfo.Name;
3475 std::string FnName = "__ubsan_handle_" + CheckName.str();
3476 if (CheckInfo.Version && !MinimalRuntime)
3477 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3478 if (MinimalRuntime)
3479 FnName += "_minimal";
3480 if (NeedsAbortSuffix)
3481 FnName += "_abort";
3482 bool MayReturn =
3483 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3484
3485 llvm::AttrBuilder B(CGF.getLLVMContext());
3486 if (!MayReturn) {
3487 B.addAttribute(llvm::Attribute::NoReturn)
3488 .addAttribute(llvm::Attribute::NoUnwind);
3489 }
3490 B.addUWTableAttr(llvm::UWTableKind::Default);
3491
3492 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3493 FnType, FnName,
3494 llvm::AttributeList::get(CGF.getLLVMContext(),
3495 llvm::AttributeList::FunctionIndex, B),
3496 /*Local=*/true);
3497 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3498 if (!MayReturn) {
3499 HandlerCall->setDoesNotReturn();
3500 CGF.Builder.CreateUnreachable();
3501 } else {
3502 CGF.Builder.CreateBr(ContBB);
3503 }
3504}
3505
3507 ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3508 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3509 ArrayRef<llvm::Value *> DynamicArgs) {
3510 assert(IsSanitizerScope);
3511 assert(Checked.size() > 0);
3512 assert(CheckHandler >= 0 &&
3513 size_t(CheckHandler) < std::size(SanitizerHandlers));
3514 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3515
3516 llvm::Value *FatalCond = nullptr;
3517 llvm::Value *RecoverableCond = nullptr;
3518 llvm::Value *TrapCond = nullptr;
3519 for (int i = 0, n = Checked.size(); i < n; ++i) {
3520 llvm::Value *Check = Checked[i].first;
3521 // -fsanitize-trap= overrides -fsanitize-recover=.
3522 llvm::Value *&Cond =
3523 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3524 ? TrapCond
3525 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3526 ? RecoverableCond
3527 : FatalCond;
3528 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3529 }
3530
3532 llvm::Value *Allow =
3533 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3534 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3535
3536 for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3537 if (*Cond)
3538 *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3539 }
3540 }
3541
3542 if (TrapCond)
3543 EmitTrapCheck(TrapCond, CheckHandler);
3544 if (!FatalCond && !RecoverableCond)
3545 return;
3546
3547 llvm::Value *JointCond;
3548 if (FatalCond && RecoverableCond)
3549 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3550 else
3551 JointCond = FatalCond ? FatalCond : RecoverableCond;
3552 assert(JointCond);
3553
3554 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3555 assert(SanOpts.has(Checked[0].second));
3556#ifndef NDEBUG
3557 for (int i = 1, n = Checked.size(); i < n; ++i) {
3558 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3559 "All recoverable kinds in a single check must be same!");
3560 assert(SanOpts.has(Checked[i].second));
3561 }
3562#endif
3563
3564 llvm::BasicBlock *Cont = createBasicBlock("cont");
3565 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3566 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3567 // Give hint that we very much don't expect to execute the handler
3568 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3569 llvm::MDBuilder MDHelper(getLLVMContext());
3570 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3571 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3572 EmitBlock(Handlers);
3573
3574 // Handler functions take an i8* pointing to the (handler-specific) static
3575 // information block, followed by a sequence of intptr_t arguments
3576 // representing operand values.
3579 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3580 Args.reserve(DynamicArgs.size() + 1);
3581 ArgTypes.reserve(DynamicArgs.size() + 1);
3582
3583 // Emit handler arguments and create handler function type.
3584 if (!StaticArgs.empty()) {
3585 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3586 auto *InfoPtr = new llvm::GlobalVariable(
3587 CGM.getModule(), Info->getType(), false,
3588 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3589 llvm::GlobalVariable::NotThreadLocal,
3590 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3591 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3593 Args.push_back(InfoPtr);
3594 ArgTypes.push_back(Args.back()->getType());
3595 }
3596
3597 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3598 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3599 ArgTypes.push_back(IntPtrTy);
3600 }
3601 }
3602
3603 llvm::FunctionType *FnType =
3604 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3605
3606 if (!FatalCond || !RecoverableCond) {
3607 // Simple case: we need to generate a single handler call, either
3608 // fatal, or non-fatal.
3609 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3610 (FatalCond != nullptr), Cont);
3611 } else {
3612 // Emit two handler calls: first one for set of unrecoverable checks,
3613 // another one for recoverable.
3614 llvm::BasicBlock *NonFatalHandlerBB =
3615 createBasicBlock("non_fatal." + CheckName);
3616 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3617 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3618 EmitBlock(FatalHandlerBB);
3619 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3620 NonFatalHandlerBB);
3621 EmitBlock(NonFatalHandlerBB);
3622 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3623 Cont);
3624 }
3625
3626 EmitBlock(Cont);
3627}
3628
3630 SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3631 llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3632 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3633
3634 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3635 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3636
3637 llvm::MDBuilder MDHelper(getLLVMContext());
3638 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3639 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3640
3641 EmitBlock(CheckBB);
3642
3643 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3644
3645 llvm::CallInst *CheckCall;
3646 llvm::FunctionCallee SlowPathFn;
3647 if (WithDiag) {
3648 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3649 auto *InfoPtr =
3650 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3651 llvm::GlobalVariable::PrivateLinkage, Info);
3652 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3654
3655 SlowPathFn = CGM.getModule().getOrInsertFunction(
3656 "__cfi_slowpath_diag",
3657 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3658 false));
3659 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3660 } else {
3661 SlowPathFn = CGM.getModule().getOrInsertFunction(
3662 "__cfi_slowpath",
3663 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3664 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3665 }
3666
3668 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3669 CheckCall->setDoesNotThrow();
3670
3671 EmitBlock(Cont);
3672}
3673
3674// Emit a stub for __cfi_check function so that the linker knows about this
3675// symbol in LTO mode.
3677 llvm::Module *M = &CGM.getModule();
3678 ASTContext &C = getContext();
3679 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3680
3682 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3683 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3684 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3686 FnArgs.push_back(&ArgCallsiteTypeId);
3687 FnArgs.push_back(&ArgAddr);
3688 FnArgs.push_back(&ArgCFICheckFailData);
3689 const CGFunctionInfo &FI =
3691
3692 llvm::Function *F = llvm::Function::Create(
3693 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3694 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3695 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3697 F->setAlignment(llvm::Align(4096));
3698 CGM.setDSOLocal(F);
3699
3700 llvm::LLVMContext &Ctx = M->getContext();
3701 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3702 // CrossDSOCFI pass is not executed if there is no executable code.
3703 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3704 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3705 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3706}
3707
3708// This function is basically a switch over the CFI failure kind, which is
3709// extracted from CFICheckFailData (1st function argument). Each case is either
3710// llvm.trap or a call to one of the two runtime handlers, based on
3711// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3712// failure kind) traps, but this should really never happen. CFICheckFailData
3713// can be nullptr if the calling module has -fsanitize-trap behavior for this
3714// check kind; in this case __cfi_check_fail traps as well.
3716 SanitizerScope SanScope(this);
3717 FunctionArgList Args;
3722 Args.push_back(&ArgData);
3723 Args.push_back(&ArgAddr);
3724
3725 const CGFunctionInfo &FI =
3727
3728 llvm::Function *F = llvm::Function::Create(
3729 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3730 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3731
3732 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3734 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3735
3736 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3737 SourceLocation());
3738
3739 // This function is not affected by NoSanitizeList. This function does
3740 // not have a source location, but "src:*" would still apply. Revert any
3741 // changes to SanOpts made in StartFunction.
3743
3744 llvm::Value *Data =
3745 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3746 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3747 llvm::Value *Addr =
3748 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3749 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3750
3751 // Data == nullptr means the calling module has trap behaviour for this check.
3752 llvm::Value *DataIsNotNullPtr =
3753 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3754 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3755
3756 llvm::StructType *SourceLocationTy =
3757 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3758 llvm::StructType *CfiCheckFailDataTy =
3759 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3760
3761 llvm::Value *V = Builder.CreateConstGEP2_32(
3762 CfiCheckFailDataTy,
3763 Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3764 0);
3765
3766 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3767 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3768
3769 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3771 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3772 llvm::Value *ValidVtable = Builder.CreateZExt(
3773 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3774 {Addr, AllVtables}),
3775 IntPtrTy);
3776
3777 const std::pair<int, SanitizerMask> CheckKinds[] = {
3778 {CFITCK_VCall, SanitizerKind::CFIVCall},
3779 {CFITCK_NVCall, SanitizerKind::CFINVCall},
3780 {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3781 {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3782 {CFITCK_ICall, SanitizerKind::CFIICall}};
3783
3785 for (auto CheckKindMaskPair : CheckKinds) {
3786 int Kind = CheckKindMaskPair.first;
3787 SanitizerMask Mask = CheckKindMaskPair.second;
3788 llvm::Value *Cond =
3789 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3790 if (CGM.getLangOpts().Sanitize.has(Mask))
3791 EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3792 {Data, Addr, ValidVtable});
3793 else
3794 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3795 }
3796
3798 // The only reference to this function will be created during LTO link.
3799 // Make sure it survives until then.
3800 CGM.addUsedGlobal(F);
3801}
3802
3804 if (SanOpts.has(SanitizerKind::Unreachable)) {
3805 SanitizerScope SanScope(this);
3806 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3807 SanitizerKind::Unreachable),
3808 SanitizerHandler::BuiltinUnreachable,
3809 EmitCheckSourceLocation(Loc), std::nullopt);
3810 }
3811 Builder.CreateUnreachable();
3812}
3813
3814void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3815 SanitizerHandler CheckHandlerID) {
3816 llvm::BasicBlock *Cont = createBasicBlock("cont");
3817
3818 // If we're optimizing, collapse all calls to trap down to just one per
3819 // check-type per function to save on code size.
3820 if ((int)TrapBBs.size() <= CheckHandlerID)
3821 TrapBBs.resize(CheckHandlerID + 1);
3822
3823 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3824
3826 CGM.getCodeGenOpts().OptimizationLevel && TrapBB &&
3827 (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) {
3828 auto Call = TrapBB->begin();
3829 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3830
3831 Call->applyMergedLocation(Call->getDebugLoc(),
3832 Builder.getCurrentDebugLocation());
3833 Builder.CreateCondBr(Checked, Cont, TrapBB);
3834 } else {
3835 TrapBB = createBasicBlock("trap");
3836 Builder.CreateCondBr(Checked, Cont, TrapBB);
3837 EmitBlock(TrapBB);
3838
3839 llvm::CallInst *TrapCall = Builder.CreateCall(
3840 CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3841 llvm::ConstantInt::get(CGM.Int8Ty, ClSanitizeDebugDeoptimization
3842 ? TrapBB->getParent()->size()
3843 : CheckHandlerID));
3844
3845 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3846 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3848 TrapCall->addFnAttr(A);
3849 }
3850 TrapCall->setDoesNotReturn();
3851 TrapCall->setDoesNotThrow();
3852 Builder.CreateUnreachable();
3853 }
3854
3855 EmitBlock(Cont);
3856}
3857
3858llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3859 llvm::CallInst *TrapCall =
3860 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3861
3862 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3863 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3865 TrapCall->addFnAttr(A);
3866 }
3867
3868 return TrapCall;
3869}
3870
3872 LValueBaseInfo *BaseInfo,
3873 TBAAAccessInfo *TBAAInfo) {
3874 assert(E->getType()->isArrayType() &&
3875 "Array to pointer decay must have array source type!");
3876
3877 // Expressions of array type can't be bitfields or vector elements.
3878 LValue LV = EmitLValue(E);
3879 Address Addr = LV.getAddress(*this);
3880
3881 // If the array type was an incomplete type, we need to make sure
3882 // the decay ends up being the right type.
3883 llvm::Type *NewTy = ConvertType(E->getType());
3884 Addr = Addr.withElementType(NewTy);
3885
3886 // Note that VLA pointers are always decayed, so we don't need to do
3887 // anything here.
3888 if (!E->getType()->isVariableArrayType()) {
3889 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3890 "Expected pointer to array");
3891 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3892 }
3893
3894 // The result of this decay conversion points to an array element within the
3895 // base lvalue. However, since TBAA currently does not support representing
3896 // accesses to elements of member arrays, we conservatively represent accesses
3897 // to the pointee object as if it had no any base lvalue specified.
3898 // TODO: Support TBAA for member arrays.
3900 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3901 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3902
3903 return Addr.withElementType(ConvertTypeForMem(EltType));
3904}
3905
3906/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3907/// array to pointer, return the array subexpression.
3908static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3909 // If this isn't just an array->pointer decay, bail out.
3910 const auto *CE = dyn_cast<CastExpr>(E);
3911 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3912 return nullptr;
3913
3914 // If this is a decay from variable width array, bail out.
3915 const Expr *SubExpr = CE->getSubExpr();
3916 if (SubExpr->getType()->isVariableArrayType())
3917 return nullptr;
3918
3919 return SubExpr;
3920}
3921
3923 llvm::Type *elemType,
3924 llvm::Value *ptr,
3925 ArrayRef<llvm::Value*> indices,
3926 bool inbounds,
3927 bool signedIndices,
3928 SourceLocation loc,
3929 const llvm::Twine &name = "arrayidx") {
3930 if (inbounds) {
3931 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
3933 name);
3934 } else {
3935 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
3936 }
3937}
3938
3941 llvm::Type *elementType, bool inbounds,
3942 bool signedIndices, SourceLocation loc,
3943 CharUnits align,
3944 const llvm::Twine &name = "arrayidx") {
3945 if (inbounds) {
3946 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
3948 align, name);
3949 } else {
3950 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
3951 }
3952}
3953
3955 llvm::Value *idx,
3956 CharUnits eltSize) {
3957 // If we have a constant index, we can use the exact offset of the
3958 // element we're accessing.
3959 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3960 CharUnits offset = constantIdx->getZExtValue() * eltSize;
3961 return arrayAlign.alignmentAtOffset(offset);
3962
3963 // Otherwise, use the worst-case alignment for any element.
3964 } else {
3965 return arrayAlign.alignmentOfArrayElement(eltSize);
3966 }
3967}
3968
3970 const VariableArrayType *vla) {
3971 QualType eltType;
3972 do {
3973 eltType = vla->getElementType();
3974 } while ((vla = ctx.getAsVariableArrayType(eltType)));
3975 return eltType;
3976}
3977
3979 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
3980}
3981
3982static bool hasBPFPreserveStaticOffset(const Expr *E) {
3983 if (!E)
3984 return false;
3985 QualType PointeeType = E->getType()->getPointeeType();
3986 if (PointeeType.isNull())
3987 return false;
3988 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
3989 return hasBPFPreserveStaticOffset(BaseDecl);
3990 return false;
3991}
3992
3993// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
3995 Address &Addr) {
3996 if (!CGF.getTarget().getTriple().isBPF())
3997 return Addr;
3998
3999 llvm::Function *Fn =
4000 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4001 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4002 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4003}
4004
4005/// Given an array base, check whether its member access belongs to a record
4006/// with preserve_access_index attribute or not.
4007static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4008 if (!ArrayBase || !CGF.getDebugInfo())
4009 return false;
4010
4011 // Only support base as either a MemberExpr or DeclRefExpr.
4012 // DeclRefExpr to cover cases like:
4013 // struct s { int a; int b[10]; };
4014 // struct s *p;
4015 // p[1].a
4016 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4017 // p->b[5] is a MemberExpr example.
4018 const Expr *E = ArrayBase->IgnoreImpCasts();
4019 if (const auto *ME = dyn_cast<MemberExpr>(E))
4020 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4021
4022 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4023 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4024 if (!VarDef)
4025 return false;
4026
4027 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4028 if (!PtrT)
4029 return false;
4030
4031 const auto *PointeeT = PtrT->getPointeeType()
4033 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4034 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4035 return false;
4036 }
4037
4038 return false;
4039}
4040
4043 QualType eltType, bool inbounds,
4044 bool signedIndices, SourceLocation loc,
4045 QualType *arrayType = nullptr,
4046 const Expr *Base = nullptr,
4047 const llvm::Twine &name = "arrayidx") {
4048 // All the indices except that last must be zero.
4049#ifndef NDEBUG
4050 for (auto *idx : indices.drop_back())
4051 assert(isa<llvm::ConstantInt>(idx) &&
4052 cast<llvm::ConstantInt>(idx)->isZero());
4053#endif
4054
4055 // Determine the element size of the statically-sized base. This is
4056 // the thing that the indices are expressed in terms of.
4057 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4058 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4059 }
4060
4061 // We can use that to compute the best alignment of the element.
4062 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4063 CharUnits eltAlign =
4064 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4065
4067 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4068
4069 llvm::Value *eltPtr;
4070 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4071 if (!LastIndex ||
4073 addr = emitArraySubscriptGEP(CGF, addr, indices,
4074 CGF.ConvertTypeForMem(eltType), inbounds,
4075 signedIndices, loc, eltAlign, name);
4076 return addr;
4077 } else {
4078 // Remember the original array subscript for bpf target
4079 unsigned idx = LastIndex->getZExtValue();
4080 llvm::DIType *DbgInfo = nullptr;
4081 if (arrayType)
4082 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4083 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4084 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4085 idx, DbgInfo);
4086 }
4087
4088 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4089}
4090
4091/// The offset of a field from the beginning of the record.
4093 const FieldDecl *FD, int64_t &Offset) {
4094 ASTContext &Ctx = CGF.getContext();
4095 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4096 unsigned FieldNo = 0;
4097
4098 for (const Decl *D : RD->decls()) {
4099 if (const auto *Record = dyn_cast<RecordDecl>(D))
4100 if (getFieldOffsetInBits(CGF, Record, FD, Offset)) {
4101 Offset += Layout.getFieldOffset(FieldNo);
4102 return true;
4103 }
4104
4105 if (const auto *Field = dyn_cast<FieldDecl>(D))
4106 if (FD == Field) {
4107 Offset += Layout.getFieldOffset(FieldNo);
4108 return true;
4109 }
4110
4111 if (isa<FieldDecl>(D))
4112 ++FieldNo;
4113 }
4114
4115 return false;
4116}
4117
4118/// Returns the relative offset difference between \p FD1 and \p FD2.
4119/// \code
4120/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4121/// \endcode
4122/// Both fields must be within the same struct.
4123static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4124 const FieldDecl *FD1,
4125 const FieldDecl *FD2) {
4126 const RecordDecl *FD1OuterRec =
4128 const RecordDecl *FD2OuterRec =
4130
4131 if (FD1OuterRec != FD2OuterRec)
4132 // Fields must be within the same RecordDecl.
4133 return std::optional<int64_t>();
4134
4135 int64_t FD1Offset = 0;
4136 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4137 return std::optional<int64_t>();
4138
4139 int64_t FD2Offset = 0;
4140 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4141 return std::optional<int64_t>();
4142
4143 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4144}
4145
4147 bool Accessed) {
4148 // The index must always be an integer, which is not an aggregate. Emit it
4149 // in lexical order (this complexity is, sadly, required by C++17).
4150 llvm::Value *IdxPre =
4151 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4152 bool SignedIndices = false;
4153 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4154 auto *Idx = IdxPre;
4155 if (E->getLHS() != E->getIdx()) {
4156 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4157 Idx = EmitScalarExpr(E->getIdx());
4158 }
4159
4160 QualType IdxTy = E->getIdx()->getType();
4161 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4162 SignedIndices |= IdxSigned;
4163
4164 if (SanOpts.has(SanitizerKind::ArrayBounds))
4165 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4166
4167 // Extend or truncate the index type to 32 or 64-bits.
4168 if (Promote && Idx->getType() != IntPtrTy)
4169 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4170
4171 return Idx;
4172 };
4173 IdxPre = nullptr;
4174
4175 // If the base is a vector type, then we are forming a vector element lvalue
4176 // with this subscript.
4177 if (E->getBase()->getType()->isVectorType() &&
4178 !isa<ExtVectorElementExpr>(E->getBase())) {
4179 // Emit the vector as an lvalue to get its address.
4180 LValue LHS = EmitLValue(E->getBase());
4181 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4182 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4183 return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
4184 E->getBase()->getType(), LHS.getBaseInfo(),
4185 TBAAAccessInfo());
4186 }
4187
4188 // All the other cases basically behave like simple offsetting.
4189
4190 // Handle the extvector case we ignored above.
4191 if (isa<ExtVectorElementExpr>(E->getBase())) {
4192 LValue LV = EmitLValue(E->getBase());
4193 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4195
4196 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4197 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4198 SignedIndices, E->getExprLoc());
4199 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4200 CGM.getTBAAInfoForSubobject(LV, EltType));
4201 }
4202
4203 LValueBaseInfo EltBaseInfo;
4204 TBAAAccessInfo EltTBAAInfo;
4205 Address Addr = Address::invalid();
4206 if (const VariableArrayType *vla =
4207 getContext().getAsVariableArrayType(E->getType())) {
4208 // The base must be a pointer, which is not an aggregate. Emit
4209 // it. It needs to be emitted first in case it's what captures
4210 // the VLA bounds.
4211 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4212 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4213
4214 // The element count here is the total number of non-VLA elements.
4215 llvm::Value *numElements = getVLASize(vla).NumElts;
4216
4217 // Effectively, the multiply by the VLA size is part of the GEP.
4218 // GEP indexes are signed, and scaling an index isn't permitted to
4219 // signed-overflow, so we use the same semantics for our explicit
4220 // multiply. We suppress this if overflow is not undefined behavior.
4221 if (getLangOpts().isSignedOverflowDefined()) {
4222 Idx = Builder.CreateMul(Idx, numElements);
4223 } else {
4224 Idx = Builder.CreateNSWMul(Idx, numElements);
4225 }
4226
4227 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4228 !getLangOpts().isSignedOverflowDefined(),
4229 SignedIndices, E->getExprLoc());
4230
4231 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4232 // Indexing over an interface, as in "NSString *P; P[4];"
4233
4234 // Emit the base pointer.
4235 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4236 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4237
4238 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4239 llvm::Value *InterfaceSizeVal =
4240 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4241
4242 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4243
4244 // We don't necessarily build correct LLVM struct types for ObjC
4245 // interfaces, so we can't rely on GEP to do this scaling
4246 // correctly, so we need to cast to i8*. FIXME: is this actually
4247 // true? A lot of other things in the fragile ABI would break...
4248 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4249
4250 // Do the GEP.
4251 CharUnits EltAlign =
4252 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4253 llvm::Value *EltPtr =
4254 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4255 ScaledIdx, false, SignedIndices, E->getExprLoc());
4256 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4257 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4258 // If this is A[i] where A is an array, the frontend will have decayed the
4259 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4260 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4261 // "gep x, i" here. Emit one "gep A, 0, i".
4262 assert(Array->getType()->isArrayType() &&
4263 "Array to pointer decay must have array source type!");
4264 LValue ArrayLV;
4265 // For simple multidimensional array indexing, set the 'accessed' flag for
4266 // better bounds-checking of the base expression.
4267 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4268 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4269 else
4270 ArrayLV = EmitLValue(Array);
4271 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4272
4273 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4274 // If the array being accessed has a "counted_by" attribute, generate
4275 // bounds checking code. The "count" field is at the top level of the
4276 // struct or in an anonymous struct, that's also at the top level. Future
4277 // expansions may allow the "count" to reside at any place in the struct,
4278 // but the value of "counted_by" will be a "simple" path to the count,
4279 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4280 // similar to emit the correct GEP.
4281 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4282 getLangOpts().getStrictFlexArraysLevel();
4283
4284 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4285 ME &&
4286 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4288 const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());
4289 if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) {
4290 if (std::optional<int64_t> Diff =
4291 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4292 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4293
4294 // Create a GEP with a byte offset between the FAM and count and
4295 // use that to load the count value.
4297 ArrayLV.getAddress(*this), Int8PtrTy, Int8Ty);
4298
4299 llvm::Type *CountTy = ConvertType(CountFD->getType());
4300 llvm::Value *Res = Builder.CreateInBoundsGEP(
4301 Int8Ty, Addr.emitRawPointer(*this),
4302 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4303 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4304 ".counted_by.load");
4305
4306 // Now emit the bounds checking.
4307 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4308 Array->getType(), Accessed);
4309 }
4310 }
4311 }
4312 }
4313
4314 // Propagate the alignment from the array itself to the result.
4315 QualType arrayType = Array->getType();
4316 Addr = emitArraySubscriptGEP(
4317 *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
4318 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4319 E->getExprLoc(), &arrayType, E->getBase());
4320 EltBaseInfo = ArrayLV.getBaseInfo();
4321 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4322 } else {
4323 // The base must be a pointer; emit it with an estimate of its alignment.
4324 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4325 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4326 QualType ptrType = E->getBase()->getType();
4327 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4328 !getLangOpts().isSignedOverflowDefined(),
4329 SignedIndices, E->getExprLoc(), &ptrType,
4330 E->getBase());
4331 }
4332
4333 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4334
4335 if (getLangOpts().ObjC &&
4336 getLangOpts().getGC() != LangOptions::NonGC) {
4339 }
4340 return LV;
4341}
4342
4344 assert(
4345 !E->isIncomplete() &&
4346 "incomplete matrix subscript expressions should be rejected during Sema");
4347 LValue Base = EmitLValue(E->getBase());
4348 llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
4349 llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
4350 llvm::Value *NumRows = Builder.getIntN(
4351 RowIdx->getType()->getScalarSizeInBits(),
4353 llvm::Value *FinalIdx =
4354 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4355 return LValue::MakeMatrixElt(
4356 MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
4357 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4358}
4359
4361 LValueBaseInfo &BaseInfo,
4362 TBAAAccessInfo &TBAAInfo,
4363 QualType BaseTy, QualType ElTy,
4364 bool IsLowerBound) {
4365 LValue BaseLVal;
4366 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4367 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4368 if (BaseTy->isArrayType()) {
4369 Address Addr = BaseLVal.getAddress(CGF);
4370 BaseInfo = BaseLVal.getBaseInfo();
4371
4372 // If the array type was an incomplete type, we need to make sure
4373 // the decay ends up being the right type.
4374 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4375 Addr = Addr.withElementType(NewTy);
4376
4377 // Note that VLA pointers are always decayed, so we don't need to do
4378 // anything here.
4379 if (!BaseTy->isVariableArrayType()) {
4380 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4381 "Expected pointer to array");
4382 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4383 }
4384
4385 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4386 }
4387 LValueBaseInfo TypeBaseInfo;
4388 TBAAAccessInfo TypeTBAAInfo;
4389 CharUnits Align =
4390 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4391 BaseInfo.mergeForCast(TypeBaseInfo);
4392 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4393 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
4394 CGF.ConvertTypeForMem(ElTy), Align);
4395 }
4396 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4397}
4398
4400 bool IsLowerBound) {
4401
4402 assert(!E->isOpenACCArraySection() &&
4403 "OpenACC Array section codegen not implemented");
4404
4406 QualType ResultExprTy;
4407 if (auto *AT = getContext().getAsArrayType(BaseTy))
4408 ResultExprTy = AT->getElementType();
4409 else
4410 ResultExprTy = BaseTy->getPointeeType();
4411 llvm::Value *Idx = nullptr;
4412 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4413 // Requesting lower bound or upper bound, but without provided length and
4414 // without ':' symbol for the default length -> length = 1.
4415 // Idx = LowerBound ?: 0;
4416 if (auto *LowerBound = E->getLowerBound()) {
4417 Idx = Builder.CreateIntCast(
4418 EmitScalarExpr(LowerBound), IntPtrTy,
4419 LowerBound->getType()->hasSignedIntegerRepresentation());
4420 } else
4421 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4422 } else {
4423 // Try to emit length or lower bound as constant. If this is possible, 1
4424 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4425 // IR (LB + Len) - 1.
4426 auto &C = CGM.getContext();
4427 auto *Length = E->getLength();
4428 llvm::APSInt ConstLength;
4429 if (Length) {
4430 // Idx = LowerBound + Length - 1;
4431 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4432 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4433 Length = nullptr;
4434 }
4435 auto *LowerBound = E->getLowerBound();
4436 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4437 if (LowerBound) {
4438 if (std::optional<llvm::APSInt> LB =
4439 LowerBound->getIntegerConstantExpr(C)) {
4440 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4441 LowerBound = nullptr;
4442 }
4443 }
4444 if (!Length)
4445 --ConstLength;
4446 else if (!LowerBound)
4447 --ConstLowerBound;
4448
4449 if (Length || LowerBound) {
4450 auto *LowerBoundVal =
4451 LowerBound
4452 ? Builder.CreateIntCast(
4453 EmitScalarExpr(LowerBound), IntPtrTy,
4454 LowerBound->getType()->hasSignedIntegerRepresentation())
4455 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4456 auto *LengthVal =
4457 Length
4458 ? Builder.CreateIntCast(
4459 EmitScalarExpr(Length), IntPtrTy,
4460 Length->getType()->hasSignedIntegerRepresentation())
4461 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4462 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4463 /*HasNUW=*/false,
4464 !getLangOpts().isSignedOverflowDefined());
4465 if (Length && LowerBound) {
4466 Idx = Builder.CreateSub(
4467 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4468 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4469 }
4470 } else
4471 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4472 } else {
4473 // Idx = ArraySize - 1;
4474 QualType ArrayTy = BaseTy->isPointerType()
4476 : BaseTy;
4477 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4478 Length = VAT->getSizeExpr();
4479 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4480 ConstLength = *L;
4481 Length = nullptr;
4482 }
4483 } else {
4484 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4485 assert(CAT && "unexpected type for array initializer");
4486 ConstLength = CAT->getSize();
4487 }
4488 if (Length) {
4489 auto *LengthVal = Builder.CreateIntCast(
4490 EmitScalarExpr(Length), IntPtrTy,
4491 Length->getType()->hasSignedIntegerRepresentation());
4492 Idx = Builder.CreateSub(
4493 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4494 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4495 } else {
4496 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4497 --ConstLength;
4498 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4499 }
4500 }
4501 }
4502 assert(Idx);
4503
4504 Address EltPtr = Address::invalid();
4505 LValueBaseInfo BaseInfo;
4506 TBAAAccessInfo TBAAInfo;
4507 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4508 // The base must be a pointer, which is not an aggregate. Emit
4509 // it. It needs to be emitted first in case it's what captures
4510 // the VLA bounds.
4511 Address Base =
4512 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4513 BaseTy, VLA->getElementType(), IsLowerBound);
4514 // The element count here is the total number of non-VLA elements.
4515 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4516
4517 // Effectively, the multiply by the VLA size is part of the GEP.
4518 // GEP indexes are signed, and scaling an index isn't permitted to
4519 // signed-overflow, so we use the same semantics for our explicit
4520 // multiply. We suppress this if overflow is not undefined behavior.
4521 if (getLangOpts().isSignedOverflowDefined())
4522 Idx = Builder.CreateMul(Idx, NumElements);
4523 else
4524 Idx = Builder.CreateNSWMul(Idx, NumElements);
4525 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4526 !getLangOpts().isSignedOverflowDefined(),
4527 /*signedIndices=*/false, E->getExprLoc());
4528 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4529 // If this is A[i] where A is an array, the frontend will have decayed the
4530 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4531 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4532 // "gep x, i" here. Emit one "gep A, 0, i".
4533 assert(Array->getType()->isArrayType() &&
4534 "Array to pointer decay must have array source type!");
4535 LValue ArrayLV;
4536 // For simple multidimensional array indexing, set the 'accessed' flag for
4537 // better bounds-checking of the base expression.
4538 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4539 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4540 else
4541 ArrayLV = EmitLValue(Array);
4542
4543 // Propagate the alignment from the array itself to the result.
4544 EltPtr = emitArraySubscriptGEP(
4545 *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
4546 ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4547 /*signedIndices=*/false, E->getExprLoc());
4548 BaseInfo = ArrayLV.getBaseInfo();
4549 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4550 } else {
4551 Address Base =
4552 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4553 ResultExprTy, IsLowerBound);
4554 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4555 !getLangOpts().isSignedOverflowDefined(),
4556 /*signedIndices=*/false, E->getExprLoc());
4557 }
4558
4559 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4560}
4561
4564 // Emit the base vector as an l-value.
4565 LValue Base;
4566
4567 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4568 if (E->isArrow()) {
4569 // If it is a pointer to a vector, emit the address and form an lvalue with
4570 // it.
4571 LValueBaseInfo BaseInfo;
4572 TBAAAccessInfo TBAAInfo;
4573 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4574 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4575 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4576 Base.getQuals().removeObjCGCAttr();
4577 } else if (E->getBase()->isGLValue()) {
4578 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4579 // emit the base as an lvalue.
4580 assert(E->getBase()->getType()->isVectorType());
4581 Base = EmitLValue(E->getBase());
4582 } else {
4583 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4584 assert(E->getBase()->getType()->isVectorType() &&
4585 "Result must be a vector");
4586 llvm::Value *Vec = EmitScalarExpr(E->getBase());
4587
4588 // Store the vector to memory (because LValue wants an address).
4589 Address VecMem = CreateMemTemp(E->getBase()->getType());
4590 Builder.CreateStore(Vec, VecMem);
4591 Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4593 }
4594
4595 QualType type =
4596 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4597
4598 // Encode the element access list into a vector of unsigned indices.
4600 E->getEncodedElementAccess(Indices);
4601
4602 if (Base.isSimple()) {
4603 llvm::Constant *CV =
4604 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4605 return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
4606 Base.getBaseInfo(), TBAAAccessInfo());
4607 }
4608 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4609
4610 llvm::Constant *BaseElts = Base.getExtVectorElts();
4612
4613 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4614 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4615 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4616 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4617 Base.getBaseInfo(), TBAAAccessInfo());
4618}
4619
4621 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4623 return EmitDeclRefLValue(DRE);
4624 }
4625
4626 Expr *BaseExpr = E->getBase();
4627 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4628 LValue BaseLV;
4629 if (E->isArrow()) {
4630 LValueBaseInfo BaseInfo;
4631 TBAAAccessInfo TBAAInfo;
4632 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4633 QualType PtrTy = BaseExpr->getType()->getPointeeType();
4634 SanitizerSet SkippedChecks;
4635 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4636 if (IsBaseCXXThis)
4637 SkippedChecks.set(SanitizerKind::Alignment, true);
4638 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4639 SkippedChecks.set(SanitizerKind::Null, true);
4640 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4641 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4642 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4643 } else
4644 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4645
4646 NamedDecl *ND = E->getMemberDecl();
4647 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4648 LValue LV = EmitLValueForField(BaseLV, Field);
4650 if (getLangOpts().OpenMP) {
4651 // If the member was explicitly marked as nontemporal, mark it as
4652 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4653 // to children as nontemporal too.
4654 if ((IsWrappedCXXThis(BaseExpr) &&
4656 BaseLV.isNontemporal())
4657 LV.setNontemporal(/*Value=*/true);
4658 }
4659 return LV;
4660 }
4661
4662 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4663 return EmitFunctionDeclLValue(*this, E, FD);
4664
4665 llvm_unreachable("Unhandled member declaration!");
4666}
4667
4668/// Given that we are currently emitting a lambda, emit an l-value for
4669/// one of its members.
4670///
4672 llvm::Value *ThisValue) {
4673 bool HasExplicitObjectParameter = false;
4674 if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl)) {
4675 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4676 assert(MD->getParent()->isLambda());
4677 assert(MD->getParent() == Field->getParent());
4678 }
4679 LValue LambdaLV;
4680 if (HasExplicitObjectParameter) {
4681 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4682 auto It = LocalDeclMap.find(D);
4683 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4684 Address AddrOfExplicitObject = It->getSecond();
4685 if (D->getType()->isReferenceType())
4686 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4688 else
4689 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4691 } else {
4692 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4693 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4694 }
4695 return EmitLValueForField(LambdaLV, Field);
4696}
4697
4699 return EmitLValueForLambdaField(Field, CXXABIThisValue);
4700}
4701
4702/// Get the field index in the debug info. The debug info structure/union
4703/// will ignore the unnamed bitfields.
4705 unsigned FieldIndex) {
4706 unsigned I = 0, Skipped = 0;
4707
4708 for (auto *F : Rec->getDefinition()->fields()) {
4709 if (I == FieldIndex)
4710 break;
4711 if (F->isUnnamedBitField())
4712 Skipped++;
4713 I++;
4714 }
4715
4716 return FieldIndex - Skipped;
4717}
4718
4719/// Get the address of a zero-sized field within a record. The resulting
4720/// address doesn't necessarily have the right type.
4722 const FieldDecl *Field) {
4724 CGF.getContext().getFieldOffset(Field));
4725 if (Offset.isZero())
4726 return Base;
4727 Base = Base.withElementType(CGF.Int8Ty);
4728 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4729}
4730
4731/// Drill down to the storage of a field without walking into
4732/// reference types.
4733///
4734/// The resulting address doesn't necessarily have the right type.
4736 const FieldDecl *field) {
4737 if (field->isZeroSize(CGF.getContext()))
4738 return emitAddrOfZeroSizeField(CGF, base, field);
4739
4740 const RecordDecl *rec = field->getParent();
4741
4742 unsigned idx =
4743 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4744
4745 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4746}
4747
4749 Address addr, const FieldDecl *field) {
4750 const RecordDecl *rec = field->getParent();
4751 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4752 base.getType(), rec->getLocation());
4753
4754 unsigned idx =
4755 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4756
4758 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4759}
4760
4761static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4762 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4763 if (!RD)
4764 return false;
4765
4766 if (RD->isDynamicClass())
4767 return true;
4768
4769 for (const auto &Base : RD->bases())
4770 if (hasAnyVptr(Base.getType(), Context))
4771 return true;
4772
4773 for (const FieldDecl *Field : RD->fields())
4774 if (hasAnyVptr(Field->getType(), Context))
4775 return true;
4776
4777 return false;
4778}
4779
4781 const FieldDecl *field) {
4782 LValueBaseInfo BaseInfo = base.getBaseInfo();
4783
4784 if (field->isBitField()) {
4785 const CGRecordLayout &RL =
4787 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4788 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4789 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4790 Info.VolatileStorageSize != 0 &&
4791 field->getType()
4794 Address Addr = base.getAddress(*this);
4795 unsigned Idx = RL.getLLVMFieldNo(field);
4796 const RecordDecl *rec = field->getParent();
4798 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4799 if (!UseVolatile) {
4800 if (!IsInPreservedAIRegion &&
4801 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4802 if (Idx != 0)
4803 // For structs, we GEP to the field that the record layout suggests.
4804 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4805 } else {
4806 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4807 getContext().getRecordType(rec), rec->getLocation());
4809 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4810 DbgInfo);
4811 }
4812 }
4813 const unsigned SS =
4814 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4815 // Get the access type.
4816 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4817 Addr = Addr.withElementType(FieldIntTy);
4818 if (UseVolatile) {
4819 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4820 if (VolatileOffset)
4821 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4822 }
4823
4824 QualType fieldType =
4825 field->getType().withCVRQualifiers(base.getVRQualifiers());
4826 // TODO: Support TBAA for bit fields.
4827 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4828 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4829 TBAAAccessInfo());
4830 }
4831
4832 // Fields of may-alias structures are may-alias themselves.
4833 // FIXME: this should get propagated down through anonymous structs
4834 // and unions.
4835 QualType FieldType = field->getType();
4836 const RecordDecl *rec = field->getParent();
4837 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4838 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4839 TBAAAccessInfo FieldTBAAInfo;
4840 if (base.getTBAAInfo().isMayAlias() ||
4841 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4842 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4843 } else if (rec->isUnion()) {
4844 // TODO: Support TBAA for unions.
4845 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4846 } else {
4847 // If no base type been assigned for the base access, then try to generate
4848 // one for this base lvalue.
4849 FieldTBAAInfo = base.getTBAAInfo();
4850 if (!FieldTBAAInfo.BaseType) {
4851 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4852 assert(!FieldTBAAInfo.Offset &&
4853 "Nonzero offset for an access with no base type!");
4854 }
4855
4856 // Adjust offset to be relative to the base type.
4857 const ASTRecordLayout &Layout =
4859 unsigned CharWidth = getContext().getCharWidth();
4860 if (FieldTBAAInfo.BaseType)
4861 FieldTBAAInfo.Offset +=
4862 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4863
4864 // Update the final access type and size.
4865 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4866 FieldTBAAInfo.Size =
4868 }
4869
4870 Address addr = base.getAddress(*this);
4872 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4873 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4874 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4875 ClassDef->isDynamicClass()) {
4876 // Getting to any field of dynamic object requires stripping dynamic
4877 // information provided by invariant.group. This is because accessing
4878 // fields may leak the real address of dynamic object, which could result
4879 // in miscompilation when leaked pointer would be compared.
4880 auto *stripped =
4882 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
4883 }
4884 }
4885
4886 unsigned RecordCVR = base.getVRQualifiers();
4887 if (rec->isUnion()) {
4888 // For unions, there is no pointer adjustment.
4889 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4890 hasAnyVptr(FieldType, getContext()))
4891 // Because unions can easily skip invariant.barriers, we need to add
4892 // a barrier every time CXXRecord field with vptr is referenced.
4894
4896 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4897 // Remember the original union field index
4898 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
4899 rec->getLocation());
4900 addr =
4902 addr.emitRawPointer(*this),
4903 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
4904 addr.getElementType(), addr.getAlignment());
4905 }
4906
4907 if (FieldType->isReferenceType())
4908 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4909 } else {
4910 if (!IsInPreservedAIRegion &&
4911 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
4912 // For structs, we GEP to the field that the record layout suggests.
4913 addr = emitAddrOfFieldStorage(*this, addr, field);
4914 else
4915 // Remember the original struct field index
4916 addr = emitPreserveStructAccess(*this, base, addr, field);
4917 }
4918
4919 // If this is a reference field, load the reference right now.
4920 if (FieldType->isReferenceType()) {
4921 LValue RefLVal =
4922 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4923 if (RecordCVR & Qualifiers::Volatile)
4924 RefLVal.getQuals().addVolatile();
4925 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
4926
4927 // Qualifiers on the struct don't apply to the referencee.
4928 RecordCVR = 0;
4929 FieldType = FieldType->getPointeeType();
4930 }
4931
4932 // Make sure that the address is pointing to the right type. This is critical
4933 // for both unions and structs.
4934 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4935
4936 if (field->hasAttr<AnnotateAttr>())
4937 addr = EmitFieldAnnotations(field, addr);
4938
4939 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4940 LV.getQuals().addCVRQualifiers(RecordCVR);
4941
4942 // __weak attribute on a field is ignored.
4945
4946 return LV;
4947}
4948
4949LValue
4951 const FieldDecl *Field) {
4952 QualType FieldType = Field->getType();
4953
4954 if (!FieldType->isReferenceType())
4955 return EmitLValueForField(Base, Field);
4956
4957 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
4958
4959 // Make sure that the address is pointing to the right type.
4960 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
4961 V = V.withElementType(llvmType);
4962
4963 // TODO: Generate TBAA information that describes this access as a structure
4964 // member access and not just an access to an object of the field's type. This
4965 // should be similar to what we do in EmitLValueForField().
4966 LValueBaseInfo BaseInfo = Base.getBaseInfo();
4967 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
4968 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
4969 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
4970 CGM.getTBAAInfoForSubobject(Base, FieldType));
4971}
4972
4974 if (E->isFileScope()) {
4976 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
4977 }
4978 if (E->getType()->isVariablyModifiedType())
4979 // make sure to emit the VLA size.
4981
4982 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
4983 const Expr *InitExpr = E->getInitializer();
4985
4986 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
4987 /*Init*/ true);
4988
4989 // Block-scope compound literals are destroyed at the end of the enclosing
4990 // scope in C.
4991 if (!getLangOpts().CPlusPlus)
4994 E->getType(), getDestroyer(DtorKind),
4995 DtorKind & EHCleanup);
4996
4997 return Result;
4998}
4999
5001 if (!E->isGLValue())
5002 // Initializing an aggregate temporary in C++11: T{...}.
5003 return EmitAggExprToLValue(E);
5004
5005 // An lvalue initializer list must be initializing a reference.
5006 assert(E->isTransparent() && "non-transparent glvalue init list");
5007 return EmitLValue(E->getInit(0));
5008}
5009
5010/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5011/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5012/// LValue is returned and the current block has been terminated.
5013static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5014 const Expr *Operand) {
5015 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5016 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5017 return std::nullopt;
5018 }
5019
5020 return CGF.EmitLValue(Operand);
5021}
5022
5023namespace {
5024// Handle the case where the condition is a constant evaluatable simple integer,
5025// which means we don't have to separately handle the true/false blocks.
5026std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5028 const Expr *condExpr = E->getCond();
5029 bool CondExprBool;
5030 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5031 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5032 if (!CondExprBool)
5033 std::swap(Live, Dead);
5034
5035 if (!CGF.ContainsLabel(Dead)) {
5036 // If the true case is live, we need to track its region.
5037 if (CondExprBool)
5039 // If a throw expression we emit it and return an undefined lvalue
5040 // because it can't be used.
5041 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5042 CGF.EmitCXXThrowExpr(ThrowExpr);
5043 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5044 llvm::Type *Ty = CGF.UnqualPtrTy;
5045 return CGF.MakeAddrLValue(
5046 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5047 Dead->getType());
5048 }
5049 return CGF.EmitLValue(Live);
5050 }
5051 }
5052 return std::nullopt;
5053}
5054struct ConditionalInfo {
5055 llvm::BasicBlock *lhsBlock, *rhsBlock;
5056 std::optional<LValue> LHS, RHS;
5057};
5058
5059// Create and generate the 3 blocks for a conditional operator.
5060// Leaves the 'current block' in the continuation basic block.
5061template<typename FuncTy>
5062ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5064 const FuncTy &BranchGenFunc) {
5065 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5066 CGF.createBasicBlock("cond.false"), std::nullopt,
5067 std::nullopt};
5068 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5069
5070 CodeGenFunction::ConditionalEvaluation eval(CGF);
5071 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5072 CGF.getProfileCount(E));
5073
5074 // Any temporaries created here are conditional.
5075 CGF.EmitBlock(Info.lhsBlock);
5077 eval.begin(CGF);
5078 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5079 eval.end(CGF);
5080 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5081
5082 if (Info.LHS)
5083 CGF.Builder.CreateBr(endBlock);
5084
5085 // Any temporaries created here are conditional.
5086 CGF.EmitBlock(Info.rhsBlock);
5087 eval.begin(CGF);
5088 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5089 eval.end(CGF);
5090 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5091 CGF.EmitBlock(endBlock);
5092
5093 return Info;
5094}
5095} // namespace
5096
5098 const AbstractConditionalOperator *E) {
5099 if (!E->isGLValue()) {
5100 // ?: here should be an aggregate.
5101 assert(hasAggregateEvaluationKind(E->getType()) &&
5102 "Unexpected conditional operator!");
5103 return (void)EmitAggExprToLValue(E);
5104 }
5105
5106 OpaqueValueMapping binding(*this, E);
5107 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5108 return;
5109
5110 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5111 CGF.EmitIgnoredExpr(E);
5112 return LValue{};
5113 });
5114}
5117 if (!expr->isGLValue()) {
5118 // ?: here should be an aggregate.
5119 assert(hasAggregateEvaluationKind(expr->getType()) &&
5120 "Unexpected conditional operator!");
5121 return EmitAggExprToLValue(expr);
5122 }
5123
5124 OpaqueValueMapping binding(*this, expr);
5125 if (std::optional<LValue> Res =
5126 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5127 return *Res;
5128
5129 ConditionalInfo Info = EmitConditionalBlocks(
5130 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5131 return EmitLValueOrThrowExpression(CGF, E);
5132 });
5133
5134 if ((Info.LHS && !Info.LHS->isSimple()) ||
5135 (Info.RHS && !Info.RHS->isSimple()))
5136 return EmitUnsupportedLValue(expr, "conditional operator");
5137
5138 if (Info.LHS && Info.RHS) {
5139 Address lhsAddr = Info.LHS->getAddress(*this);
5140 Address rhsAddr = Info.RHS->getAddress(*this);
5142 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5143 Builder.GetInsertBlock(), expr->getType());
5144 AlignmentSource alignSource =
5145 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5146 Info.RHS->getBaseInfo().getAlignmentSource());
5148 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5149 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5150 TBAAInfo);
5151 } else {
5152 assert((Info.LHS || Info.RHS) &&
5153 "both operands of glvalue conditional are throw-expressions?");
5154 return Info.LHS ? *Info.LHS : *Info.RHS;
5155 }
5156}
5157
5158/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5159/// type. If the cast is to a reference, we can have the usual lvalue result,
5160/// otherwise if a cast is needed by the code generator in an lvalue context,
5161/// then it must mean that we need the address of an aggregate in order to
5162/// access one of its members. This can happen for all the reasons that casts
5163/// are permitted with aggregate result, including noop aggregate casts, and
5164/// cast from scalar to union.
5166 switch (E->getCastKind()) {
5167 case CK_ToVoid:
5168 case CK_BitCast:
5169 case CK_LValueToRValueBitCast:
5170 case CK_ArrayToPointerDecay:
5171 case CK_FunctionToPointerDecay:
5172 case CK_NullToMemberPointer:
5173 case CK_NullToPointer:
5174 case CK_IntegralToPointer:
5175 case CK_PointerToIntegral:
5176 case CK_PointerToBoolean:
5177 case CK_IntegralCast:
5178 case CK_BooleanToSignedIntegral:
5179 case CK_IntegralToBoolean:
5180 case CK_IntegralToFloating:
5181 case CK_FloatingToIntegral:
5182 case CK_FloatingToBoolean:
5183 case CK_FloatingCast:
5184 case CK_FloatingRealToComplex:
5185 case CK_FloatingComplexToReal:
5186 case CK_FloatingComplexToBoolean:
5187 case CK_FloatingComplexCast:
5188 case CK_FloatingComplexToIntegralComplex:
5189 case CK_IntegralRealToComplex:
5190 case CK_IntegralComplexToReal:
5191 case CK_IntegralComplexToBoolean:
5192 case CK_IntegralComplexCast:
5193 case CK_IntegralComplexToFloatingComplex:
5194 case CK_DerivedToBaseMemberPointer:
5195 case CK_BaseToDerivedMemberPointer:
5196 case CK_MemberPointerToBoolean:
5197 case CK_ReinterpretMemberPointer:
5198 case CK_AnyPointerToBlockPointerCast:
5199 case CK_ARCProduceObject:
5200 case CK_ARCConsumeObject:
5201 case CK_ARCReclaimReturnedObject:
5202 case CK_ARCExtendBlockObject:
5203 case CK_CopyAndAutoreleaseBlockObject:
5204 case CK_IntToOCLSampler:
5205 case CK_FloatingToFixedPoint:
5206 case CK_FixedPointToFloating:
5207 case CK_FixedPointCast:
5208 case CK_FixedPointToBoolean:
5209 case CK_FixedPointToIntegral:
5210 case CK_IntegralToFixedPoint:
5211 case CK_MatrixCast:
5212 case CK_HLSLVectorTruncation:
5213 case CK_HLSLArrayRValue:
5214 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5215
5216 case CK_Dependent:
5217 llvm_unreachable("dependent cast kind in IR gen!");
5218
5219 case CK_BuiltinFnToFnPtr:
5220 llvm_unreachable("builtin functions are handled elsewhere");
5221
5222 // These are never l-values; just use the aggregate emission code.
5223 case CK_NonAtomicToAtomic:
5224 case CK_AtomicToNonAtomic:
5225 return EmitAggExprToLValue(E);
5226
5227 case CK_Dynamic: {
5228 LValue LV = EmitLValue(E->getSubExpr());
5229 Address V = LV.getAddress(*this);
5230 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5232 }
5233
5234 case CK_ConstructorConversion:
5235 case CK_UserDefinedConversion:
5236 case CK_CPointerToObjCPointerCast:
5237 case CK_BlockPointerToObjCPointerCast:
5238 case CK_LValueToRValue:
5239 return EmitLValue(E->getSubExpr());
5240
5241 case CK_NoOp: {
5242 // CK_NoOp can model a qualification conversion, which can remove an array
5243 // bound and change the IR type.
5244 // FIXME: Once pointee types are removed from IR, remove this.
5245 LValue LV = EmitLValue(E->getSubExpr());
5246 // Propagate the volatile qualifer to LValue, if exist in E.
5248 LV.getQuals() = E->getType().getQualifiers();
5249 if (LV.isSimple()) {
5250 Address V = LV.getAddress(*this);
5251 if (V.isValid()) {
5252 llvm::Type *T = ConvertTypeForMem(E->getType());
5253 if (V.getElementType() != T)
5254 LV.setAddress(V.withElementType(T));
5255 }
5256 }
5257 return LV;
5258 }
5259
5260 case CK_UncheckedDerivedToBase:
5261 case CK_DerivedToBase: {
5262 const auto *DerivedClassTy =
5263 E->getSubExpr()->getType()->castAs<RecordType>();
5264 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5265
5266 LValue LV = EmitLValue(E->getSubExpr());
5267 Address This = LV.getAddress(*this);
5268
5269 // Perform the derived-to-base conversion
5271 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5272 /*NullCheckValue=*/false, E->getExprLoc());
5273
5274 // TODO: Support accesses to members of base classes in TBAA. For now, we
5275 // conservatively pretend that the complete object is of the base class
5276 // type.
5277 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5279 }
5280 case CK_ToUnion:
5281 return EmitAggExprToLValue(E);
5282 case CK_BaseToDerived: {
5283 const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5284 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5285
5286 LValue LV = EmitLValue(E->getSubExpr());
5287
5288 // Perform the base-to-derived conversion
5290 LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(),
5291 /*NullCheckValue=*/false);
5292
5293 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5294 // performed and the object is not of the derived type.
5297 E->getType());
5298
5299 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5300 EmitVTablePtrCheckForCast(E->getType(), Derived,
5301 /*MayBeNull=*/false, CFITCK_DerivedCast,
5302 E->getBeginLoc());
5303
5304 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5306 }
5307 case CK_LValueBitCast: {
5308 // This must be a reinterpret_cast (or c-style equivalent).
5309 const auto *CE = cast<ExplicitCastExpr>(E);
5310
5311 CGM.EmitExplicitCastExprType(CE, this);
5312 LValue LV = EmitLValue(E->getSubExpr());
5313 Address V = LV.getAddress(*this).withElementType(
5314 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5315
5316 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5318 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5319 E->getBeginLoc());
5320
5321 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5323 }
5324 case CK_AddressSpaceConversion: {
5325 LValue LV = EmitLValue(E->getSubExpr());
5326 QualType DestTy = getContext().getPointerType(E->getType());
5327 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5328 *this, LV.getPointer(*this),
5330 E->getType().getAddressSpace(), ConvertType(DestTy));
5332 LV.getAddress(*this).getAlignment()),
5333 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5334 }
5335 case CK_ObjCObjectLValueCast: {
5336 LValue LV = EmitLValue(E->getSubExpr());
5338 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5340 }
5341 case CK_ZeroToOCLOpaqueType:
5342 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5343
5344 case CK_VectorSplat: {
5345 // LValue results of vector splats are only supported in HLSL.
5346 if (!getLangOpts().HLSL)
5347 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5348 return EmitLValue(E->getSubExpr());
5349 }
5350 }
5351
5352 llvm_unreachable("Unhandled lvalue cast kind?");
5353}
5354
5358}
5359
5360LValue
5363
5364 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5365 it = OpaqueLValues.find(e);
5366
5367 if (it != OpaqueLValues.end())
5368 return it->second;
5369
5370 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5371 return EmitLValue(e->getSourceExpr());
5372}
5373
5374RValue
5377
5378 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5379 it = OpaqueRValues.find(e);
5380
5381 if (it != OpaqueRValues.end())
5382 return it->second;
5383
5384 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5385 return EmitAnyExpr(e->getSourceExpr());
5386}
5387
5389 const FieldDecl *FD,
5390 SourceLocation Loc) {
5391 QualType FT = FD->getType();
5392 LValue FieldLV = EmitLValueForField(LV, FD);
5393 switch (getEvaluationKind(FT)) {
5394 case TEK_Complex:
5395 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5396 case TEK_Aggregate:
5397 return FieldLV.asAggregateRValue(*this);
5398 case TEK_Scalar:
5399 // This routine is used to load fields one-by-one to perform a copy, so
5400 // don't load reference fields.
5401 if (FD->getType()->isReferenceType())
5402 return RValue::get(FieldLV.getPointer(*this));
5403 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5404 // primitive load.
5405 if (FieldLV.isBitField())
5406 return EmitLoadOfLValue(FieldLV, Loc);
5407 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5408 }
5409 llvm_unreachable("bad evaluation kind");
5410}
5411
5412//===--------------------------------------------------------------------===//
5413// Expression Emission
5414//===--------------------------------------------------------------------===//
5415
5417 ReturnValueSlot ReturnValue) {
5418 // Builtins never have block type.
5419 if (E->getCallee()->getType()->isBlockPointerType())
5420 return EmitBlockCallExpr(E, ReturnValue);
5421
5422 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5424
5425 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5427
5428 // A CXXOperatorCallExpr is created even for explicit object methods, but
5429 // these should be treated like static function call.
5430 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5431 if (const auto *MD =
5432 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5433 MD && MD->isImplicitObjectMemberFunction())
5435
5436 CGCallee callee = EmitCallee(E->getCallee());
5437
5438 if (callee.isBuiltin()) {
5439 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5440 E, ReturnValue);
5441 }
5442
5443 if (callee.isPseudoDestructor()) {
5445 }
5446
5447 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
5448}
5449
5450/// Emit a CallExpr without considering whether it might be a subclass.
5452 ReturnValueSlot ReturnValue) {
5454 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
5455}
5456
5457// Detect the unusual situation where an inline version is shadowed by a
5458// non-inline version. In that case we should pick the external one
5459// everywhere. That's GCC behavior too.
5461 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5462 if (!PD->isInlineBuiltinDeclaration())
5463 return false;
5464 return true;
5465}
5466
5468 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5469
5470 if (auto builtinID = FD->getBuiltinID()) {
5471 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5472 std::string NoBuiltins = "no-builtins";
5473
5474 StringRef Ident = CGF.CGM.getMangledName(GD);
5475 std::string FDInlineName = (Ident + ".inline").str();
5476
5477 bool IsPredefinedLibFunction =
5479 bool HasAttributeNoBuiltin =
5480 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5481 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5482
5483 // When directing calling an inline builtin, call it through it's mangled
5484 // name to make it clear it's not the actual builtin.
5485 if (CGF.CurFn->getName() != FDInlineName &&
5487 llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
5488 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5489 llvm::Module *M = Fn->getParent();
5490 llvm::Function *Clone = M->getFunction(FDInlineName);
5491 if (!Clone) {
5492 Clone = llvm::Function::Create(Fn->getFunctionType(),
5493 llvm::GlobalValue::InternalLinkage,
5494 Fn->getAddressSpace(), FDInlineName, M);
5495 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5496 }
5497 return CGCallee::forDirect(Clone, GD);
5498 }
5499
5500 // Replaceable builtins provide their own implementation of a builtin. If we
5501 // are in an inline builtin implementation, avoid trivial infinite
5502 // recursion. Honor __attribute__((no_builtin("foo"))) or
5503 // __attribute__((no_builtin)) on the current function unless foo is
5504 // not a predefined library function which means we must generate the
5505 // builtin no matter what.
5506 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5507 return CGCallee::forBuiltin(builtinID, FD);
5508 }
5509
5510 llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
5511 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5512 FD->hasAttr<CUDAGlobalAttr>())
5513 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5514 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5515
5516 return CGCallee::forDirect(CalleePtr, GD);
5517}
5518
5520 E = E->IgnoreParens();
5521
5522 // Look through function-to-pointer decay.
5523 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5524 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5525 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5526 return EmitCallee(ICE->getSubExpr());
5527 }
5528
5529 // Resolve direct calls.
5530 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5531 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5532 return EmitDirectCallee(*this, FD);
5533 }
5534 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5535 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5536 EmitIgnoredExpr(ME->getBase());
5537 return EmitDirectCallee(*this, FD);
5538 }
5539
5540 // Look through template substitutions.
5541 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5542 return EmitCallee(NTTP->getReplacement());
5543
5544 // Treat pseudo-destructor calls differently.
5545 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5547 }
5548
5549 // Otherwise, we have an indirect reference.
5550 llvm::Value *calleePtr;
5552 if (auto ptrType = E->getType()->getAs<PointerType>()) {
5553 calleePtr = EmitScalarExpr(E);
5554 functionType = ptrType->getPointeeType();
5555 } else {
5556 functionType = E->getType();
5557 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5558 }
5559 assert(functionType->isFunctionType());
5560
5561 GlobalDecl GD;
5562 if (const auto *VD =
5563 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5564 GD = GlobalDecl(VD);
5565
5566 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5567 CGCallee callee(calleeInfo, calleePtr);
5568 return callee;
5569}
5570
5572 // Comma expressions just emit their LHS then their RHS as an l-value.
5573 if (E->getOpcode() == BO_Comma) {
5574 EmitIgnoredExpr(E->getLHS());
5576 return EmitLValue(E->getRHS());
5577 }
5578
5579 if (E->getOpcode() == BO_PtrMemD ||
5580 E->getOpcode() == BO_PtrMemI)
5582
5583 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5584
5585 // Note that in all of these cases, __block variables need the RHS
5586 // evaluated first just in case the variable gets moved by the RHS.
5587
5588 switch (getEvaluationKind(E->getType())) {
5589 case TEK_Scalar: {
5590 switch (E->getLHS()->getType().getObjCLifetime()) {
5592 return EmitARCStoreStrong(E, /*ignored*/ false).first;
5593
5595 return EmitARCStoreAutoreleasing(E).first;
5596
5597 // No reason to do any of these differently.
5601 break;
5602 }
5603
5604 // TODO: Can we de-duplicate this code with the corresponding code in
5605 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5606 RValue RV;
5607 llvm::Value *Previous = nullptr;
5608 QualType SrcType = E->getRHS()->getType();
5609 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5610 // we want to extract that value and potentially (if the bitfield sanitizer
5611 // is enabled) use it to check for an implicit conversion.
5612 if (E->getLHS()->refersToBitField()) {
5613 llvm::Value *RHS =
5615 RV = RValue::get(RHS);
5616 } else
5617 RV = EmitAnyExpr(E->getRHS());
5618
5620
5621 if (RV.isScalar())
5623
5624 if (LV.isBitField()) {
5625 llvm::Value *Result = nullptr;
5626 // If bitfield sanitizers are enabled we want to use the result
5627 // to check whether a truncation or sign change has occurred.
5628 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5630 else
5632
5633 // If the expression contained an implicit conversion, make sure
5634 // to use the value before the scalar conversion.
5635 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5636 QualType DstType = E->getLHS()->getType();
5637 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5638 LV.getBitFieldInfo(), E->getExprLoc());
5639 } else
5640 EmitStoreThroughLValue(RV, LV);
5641
5642 if (getLangOpts().OpenMP)
5644 E->getLHS());
5645 return LV;
5646 }
5647
5648 case TEK_Complex:
5650
5651 case TEK_Aggregate:
5652 return EmitAggExprToLValue(E);
5653 }
5654 llvm_unreachable("bad evaluation kind");
5655}
5656
5658 RValue RV = EmitCallExpr(E);
5659
5660 if (!RV.isScalar())
5661 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5663
5664 assert(E->getCallReturnType(getContext())->isReferenceType() &&
5665 "Can't have a scalar return unless the return type is a "
5666 "reference type!");
5667
5669}
5670
5672 // FIXME: This shouldn't require another copy.
5673 return EmitAggExprToLValue(E);
5674}
5675
5678 && "binding l-value to type which needs a temporary");
5679 AggValueSlot Slot = CreateAggTemp(E->getType());
5680 EmitCXXConstructExpr(E, Slot);
5682}
5683
5684LValue
5687}
5688
5692}
5693
5697}
5698
5699LValue
5701 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5703 EmitAggExpr(E->getSubExpr(), Slot);
5704 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5706}
5707
5710
5711 if (!RV.isScalar())
5712 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5714
5715 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5716 "Can't have a scalar return unless the return type is a "
5717 "reference type!");
5718
5720}
5721
5723 Address V =
5726}
5727
5729 const ObjCIvarDecl *Ivar) {
5730 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5731}
5732
5733llvm::Value *
5735 const ObjCIvarDecl *Ivar) {
5736 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5737 QualType PointerDiffType = getContext().getPointerDiffType();
5738 return Builder.CreateZExtOrTrunc(OffsetValue,
5739 getTypes().ConvertType(PointerDiffType));
5740}
5741
5743 llvm::Value *BaseValue,
5744 const ObjCIvarDecl *Ivar,
5745 unsigned CVRQualifiers) {
5746 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5747 Ivar, CVRQualifiers);
5748}
5749
5751 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5752 llvm::Value *BaseValue = nullptr;
5753 const Expr *BaseExpr = E->getBase();
5754 Qualifiers BaseQuals;
5755 QualType ObjectTy;
5756 if (E->isArrow()) {
5757 BaseValue = EmitScalarExpr(BaseExpr);
5758 ObjectTy = BaseExpr->getType()->getPointeeType();
5759 BaseQuals = ObjectTy.getQualifiers();
5760 } else {
5761 LValue BaseLV = EmitLValue(BaseExpr);
5762 BaseValue = BaseLV.getPointer(*this);
5763 ObjectTy = BaseExpr->getType();
5764 BaseQuals = ObjectTy.getQualifiers();
5765 }
5766
5767 LValue LV =
5768 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5769 BaseQuals.getCVRQualifiers());
5771 return LV;
5772}
5773
5775 // Can only get l-value for message expression returning aggregate type
5776 RValue RV = EmitAnyExprToTemp(E);
5777 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5779}
5780
5781RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
5782 const CallExpr *E, ReturnValueSlot ReturnValue,
5783 llvm::Value *Chain) {
5784 // Get the actual function type. The callee type will always be a pointer to
5785 // function type or a block pointer type.
5786 assert(CalleeType->isFunctionPointerType() &&
5787 "Call must have function pointer type!");
5788
5789 const Decl *TargetDecl =
5790 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5791
5792 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5793 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5794 "trying to emit a call to an immediate function");
5795
5796 CalleeType = getContext().getCanonicalType(CalleeType);
5797
5798 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5799
5800 CGCallee Callee = OrigCallee;
5801
5802 if (SanOpts.has(SanitizerKind::Function) &&
5803 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
5804 !isa<FunctionNoProtoType>(PointeeType)) {
5805 if (llvm::Constant *PrefixSig =
5807 SanitizerScope SanScope(this);
5808 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
5809
5810 llvm::Type *PrefixSigType = PrefixSig->getType();
5811 llvm::StructType *PrefixStructTy = llvm::StructType::get(
5812 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
5813
5814 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5815
5816 // On 32-bit Arm, the low bit of a function pointer indicates whether
5817 // it's using the Arm or Thumb instruction set. The actual first
5818 // instruction lives at the same address either way, so we must clear
5819 // that low bit before using the function address to find the prefix
5820 // structure.
5821 //
5822 // This applies to both Arm and Thumb target triples, because
5823 // either one could be used in an interworking context where it
5824 // might be passed function pointers of both types.
5825 llvm::Value *AlignedCalleePtr;
5826 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
5827 llvm::Value *CalleeAddress =
5828 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
5829 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
5830 llvm::Value *AlignedCalleeAddress =
5831 Builder.CreateAnd(CalleeAddress, Mask);
5832 AlignedCalleePtr =
5833 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
5834 } else {
5835 AlignedCalleePtr = CalleePtr;
5836 }
5837
5838 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
5839 llvm::Value *CalleeSigPtr =
5840 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
5841 llvm::Value *CalleeSig =
5842 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
5843 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
5844
5845 llvm::BasicBlock *Cont = createBasicBlock("cont");
5846 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
5847 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
5848
5849 EmitBlock(TypeCheck);
5850 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
5851 Int32Ty,
5852 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
5853 getPointerAlign());
5854 llvm::Value *CalleeTypeHashMatch =
5855 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
5856 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
5857 EmitCheckTypeDescriptor(CalleeType)};
5858 EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function),
5859 SanitizerHandler::FunctionTypeMismatch, StaticData,
5860 {CalleePtr});
5861
5862 Builder.CreateBr(Cont);
5863 EmitBlock(Cont);
5864 }
5865 }
5866
5867 const auto *FnType = cast<FunctionType>(PointeeType);
5868
5869 // If we are checking indirect calls and this call is indirect, check that the
5870 // function pointer is a member of the bit set for the function type.
5871 if (SanOpts.has(SanitizerKind::CFIICall) &&
5872 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5873 SanitizerScope SanScope(this);
5874 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
5875
5876 llvm::Metadata *MD;
5877 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
5879 else
5881
5882 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
5883
5884 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5885 llvm::Value *TypeTest = Builder.CreateCall(
5886 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
5887
5888 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
5889 llvm::Constant *StaticData[] = {
5890 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
5893 };
5894 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
5895 EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
5896 CalleePtr, StaticData);
5897 } else {
5898 EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
5899 SanitizerHandler::CFICheckFail, StaticData,
5900 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
5901 }
5902 }
5903
5904 CallArgList Args;
5905 if (Chain)
5906 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
5907
5908 // C++17 requires that we evaluate arguments to a call using assignment syntax
5909 // right-to-left, and that we evaluate arguments to certain other operators
5910 // left-to-right. Note that we allow this to override the order dictated by
5911 // the calling convention on the MS ABI, which means that parameter
5912 // destruction order is not necessarily reverse construction order.
5913 // FIXME: Revisit this based on C++ committee response to unimplementability.
5915 bool StaticOperator = false;
5916 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
5917 if (OCE->isAssignmentOp())
5919 else {
5920 switch (OCE->getOperator()) {
5921 case OO_LessLess:
5922 case OO_GreaterGreater:
5923 case OO_AmpAmp:
5924 case OO_PipePipe:
5925 case OO_Comma:
5926 case OO_ArrowStar:
5928 break;
5929 default:
5930 break;
5931 }
5932 }
5933
5934 if (const auto *MD =
5935 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
5936 MD && MD->isStatic())
5937 StaticOperator = true;
5938 }
5939
5940 auto Arguments = E->arguments();
5941 if (StaticOperator) {
5942 // If we're calling a static operator, we need to emit the object argument
5943 // and ignore it.
5944 EmitIgnoredExpr(E->getArg(0));
5945 Arguments = drop_begin(Arguments, 1);
5946 }
5947 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
5948 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
5949
5951 Args, FnType, /*ChainCall=*/Chain);
5952
5953 // C99 6.5.2.2p6:
5954 // If the expression that denotes the called function has a type
5955 // that does not include a prototype, [the default argument
5956 // promotions are performed]. If the number of arguments does not
5957 // equal the number of parameters, the behavior is undefined. If
5958 // the function is defined with a type that includes a prototype,
5959 // and either the prototype ends with an ellipsis (, ...) or the
5960 // types of the arguments after promotion are not compatible with
5961 // the types of the parameters, the behavior is undefined. If the
5962 // function is defined with a type that does not include a
5963 // prototype, and the types of the arguments after promotion are
5964 // not compatible with those of the parameters after promotion,
5965 // the behavior is undefined [except in some trivial cases].
5966 // That is, in the general case, we should assume that a call
5967 // through an unprototyped function type works like a *non-variadic*
5968 // call. The way we make this work is to cast to the exact type
5969 // of the promoted arguments.
5970 //
5971 // Chain calls use this same code path to add the invisible chain parameter
5972 // to the function type.
5973 if (isa<FunctionNoProtoType>(FnType) || Chain) {
5974 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
5975 int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace();
5976 CalleeTy = CalleeTy->getPointerTo(AS);
5977
5978 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5979 CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
5980 Callee.setFunctionPointer(CalleePtr);
5981 }
5982
5983 // HIP function pointer contains kernel handle when it is used in triple
5984 // chevron. The kernel stub needs to be loaded from kernel handle and used
5985 // as callee.
5986 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
5987 isa<CUDAKernelCallExpr>(E) &&
5988 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5989 llvm::Value *Handle = Callee.getFunctionPointer();
5990 auto *Stub = Builder.CreateLoad(
5991 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
5992 Callee.setFunctionPointer(Stub);
5993 }
5994 llvm::CallBase *CallOrInvoke = nullptr;
5995 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,
5996 E == MustTailCall, E->getExprLoc());
5997
5998 // Generate function declaration DISuprogram in order to be used
5999 // in debug info about call sites.
6000 if (CGDebugInfo *DI = getDebugInfo()) {
6001 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6002 FunctionArgList Args;
6003 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6004 DI->EmitFuncDeclForCallSite(CallOrInvoke,
6005 DI->getFunctionType(CalleeDecl, ResTy, Args),
6006 CalleeDecl);
6007 }
6008 }
6009
6010 return Call;
6011}
6012
6015 Address BaseAddr = Address::invalid();
6016 if (E->getOpcode() == BO_PtrMemI) {
6017 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6018 } else {
6019 BaseAddr = EmitLValue(E->getLHS()).getAddress(*this);
6020 }
6021
6022 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6023 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6024
6025 LValueBaseInfo BaseInfo;
6026 TBAAAccessInfo TBAAInfo;
6027 Address MemberAddr =
6028 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6029 &TBAAInfo);
6030
6031 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6032}
6033
6034/// Given the address of a temporary variable, produce an r-value of
6035/// its type.
6037 QualType type,
6038 SourceLocation loc) {
6040 switch (getEvaluationKind(type)) {
6041 case TEK_Complex:
6042 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6043 case TEK_Aggregate:
6044 return lvalue.asAggregateRValue(*this);
6045 case TEK_Scalar:
6046 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6047 }
6048 llvm_unreachable("bad evaluation kind");
6049}
6050
6051void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6052 assert(Val->getType()->isFPOrFPVectorTy());
6053 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6054 return;
6055
6056 llvm::MDBuilder MDHelper(getLLVMContext());
6057 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6058
6059 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6060}
6061
6062void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6063 llvm::Type *EltTy = Val->getType()->getScalarType();
6064 if (!EltTy->isFloatTy())
6065 return;
6066
6067 if ((getLangOpts().OpenCL &&
6068 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6069 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6070 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6071 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6072 //
6073 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6074 // build option allows an application to specify that single precision
6075 // floating-point divide (x/y and 1/x) and sqrt used in the program
6076 // source are correctly rounded.
6077 //
6078 // TODO: CUDA has a prec-sqrt flag
6079 SetFPAccuracy(Val, 3.0f);
6080 }
6081}
6082
6083void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6084 llvm::Type *EltTy = Val->getType()->getScalarType();
6085 if (!EltTy->isFloatTy())
6086 return;
6087
6088 if ((getLangOpts().OpenCL &&
6089 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6090 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6091 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6092 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6093 //
6094 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6095 // build option allows an application to specify that single precision
6096 // floating-point divide (x/y and 1/x) and sqrt used in the program
6097 // source are correctly rounded.
6098 //
6099 // TODO: CUDA has a prec-div flag
6100 SetFPAccuracy(Val, 2.5f);
6101 }
6102}
6103
6104namespace {
6105 struct LValueOrRValue {
6106 LValue LV;
6107 RValue RV;
6108 };
6109}
6110
6111static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6112 const PseudoObjectExpr *E,
6113 bool forLValue,
6114 AggValueSlot slot) {
6116
6117 // Find the result expression, if any.
6118 const Expr *resultExpr = E->getResultExpr();
6119 LValueOrRValue result;
6120
6122 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6123 const Expr *semantic = *i;
6124
6125 // If this semantic expression is an opaque value, bind it
6126 // to the result of its source expression.
6127 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6128 // Skip unique OVEs.
6129 if (ov->isUnique()) {
6130 assert(ov != resultExpr &&
6131 "A unique OVE cannot be used as the result expression");
6132 continue;
6133 }
6134
6135 // If this is the result expression, we may need to evaluate
6136 // directly into the slot.
6137 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6138 OVMA opaqueData;
6139 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6141 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6142 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6144 opaqueData = OVMA::bind(CGF, ov, LV);
6145 result.RV = slot.asRValue();
6146
6147 // Otherwise, emit as normal.
6148 } else {
6149 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6150
6151 // If this is the result, also evaluate the result now.
6152 if (ov == resultExpr) {
6153 if (forLValue)
6154 result.LV = CGF.EmitLValue(ov);
6155 else
6156 result.RV = CGF.EmitAnyExpr(ov, slot);
6157 }
6158 }
6159
6160 opaques.push_back(opaqueData);
6161
6162 // Otherwise, if the expression is the result, evaluate it
6163 // and remember the result.
6164 } else if (semantic == resultExpr) {
6165 if (forLValue)
6166 result.LV = CGF.EmitLValue(semantic);
6167 else
6168 result.RV = CGF.EmitAnyExpr(semantic, slot);
6169
6170 // Otherwise, evaluate the expression in an ignored context.
6171 } else {
6172 CGF.EmitIgnoredExpr(semantic);
6173 }
6174 }
6175
6176 // Unbind all the opaques now.
6177 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6178 opaques[i].unbind(CGF);
6179
6180 return result;
6181}
6182
6184 AggValueSlot slot) {
6185 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6186}
6187
6189 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6190}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3284
DynTypedNode Node
Defines enum values for all the target-independent builtin functions.
CodeGenFunction::ComplexPairTy ComplexPairTy
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition: CGExpr.cpp:2637
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition: CGExpr.cpp:2880
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition: CGExpr.cpp:3908
static llvm::cl::opt< bool > ClSanitizeGuardChecks("ubsan-guard-checks", llvm::cl::Optional, llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."))
static bool hasBooleanRepresentation(QualType Ty)
Definition: CGExpr.cpp:1841
static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind)
Definition: CGExpr.cpp:3435
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition: CGExpr.cpp:3978
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field)
Drill down to the storage of a field without walking into reference types.
Definition: CGExpr.cpp:4735
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type? This is different from pr...
Definition: CGExpr.cpp:1695
@ CEK_AsReferenceOnly
Definition: CGExpr.cpp:1697
@ CEK_AsValueOnly
Definition: CGExpr.cpp:1699
@ CEK_None
Definition: CGExpr.cpp:1696
@ CEK_AsValueOrReference
Definition: CGExpr.cpp:1698
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition: CGExpr.cpp:1670
static llvm::Value * emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, llvm::Value *High)
Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
Definition: CGExpr.cpp:648
static QualType getFixedSizeElementType(const ASTContext &ctx, const VariableArrayType *vla)
Definition: CGExpr.cpp:3969
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition: CGExpr.cpp:2868
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition: CGExpr.cpp:5013
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition: CGExpr.cpp:3922
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition: CGExpr.cpp:2859
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition: CGExpr.cpp:2046
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition: CGExpr.cpp:6111
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition: CGExpr.cpp:3994
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, const MemberExpr *ME)
Definition: CGExpr.cpp:1805
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition: CGExpr.cpp:920
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2150
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition: CGExpr.cpp:1701
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition: CGExpr.cpp:1504
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition: CGExpr.cpp:1854
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition: CGExpr.cpp:4123
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition: CGExpr.cpp:5467
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *FD, RecIndicesTy &Indices)
Definition: CGExpr.cpp:1069
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition: CGExpr.cpp:2734
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition: CGExpr.cpp:5460
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition: CGExpr.cpp:2808
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition: CGExpr.cpp:4761
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition: CGExpr.cpp:4092
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition: CGExpr.cpp:4007
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition: CGExpr.cpp:2748
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD)
Determine whether we can emit a reference to VD from the current context, despite not necessarily hav...
Definition: CGExpr.cpp:2905
static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize)
Definition: CGExpr.cpp:3954
static llvm::cl::opt< bool > ClSanitizeDebugDeoptimization("ubsan-unique-traps", llvm::cl::Optional, llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."))
static RawAddress createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner, RawAddress *Alloca=nullptr)
Definition: CGExpr.cpp:394
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CGExpr.cpp:441
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2071
static llvm::Constant * EmitFunctionDeclPointer(CodeGenModule &CGM, GlobalDecl GD)
Definition: CGExpr.cpp:2847
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1249
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition: CGExpr.cpp:4748
const SanitizerHandlerInfo SanitizerHandlers[]
Definition: CGExpr.cpp:3452
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field)
Get the address of a zero-sized field within a record.
Definition: CGExpr.cpp:4721
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB)
Definition: CGExpr.cpp:3458
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition: CGExpr.cpp:4360
static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, Address ReferenceTemporary)
Definition: CGExpr.cpp:278
StringRef Filename
Definition: Format.cpp:2971
llvm::MachO::Record Record
Definition: MachO.h:31
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
Defines the SourceManager interface.
StateNode * Previous
const LValueBase getLValueBase() const
Definition: APValue.cpp:974
bool isLValue() const
Definition: APValue.h:406
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
SourceManager & getSourceManager()
Definition: ASTContext.h:705
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2574
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1118
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:646
const LangOptions & getLangOpts() const
Definition: ASTContext.h:775
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
Definition: ASTContext.h:1092
const NoSanitizeList & getNoSanitizeList() const
Definition: ASTContext.h:785
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2340
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1091
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2770
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2344
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4141
Expr * getCond() const
getCond - Return the expression representing the condition for the ?: operator.
Definition: Expr.h:4319
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition: Expr.h:4325
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition: Expr.h:4331
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6734
Expr * getBase()
Get base of the array section.
Definition: Expr.h:6800
Expr * getLength()
Get length of array section.
Definition: Expr.h:6810
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition: Expr.cpp:5063
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:6839
Expr * getLowerBound()
Get lower bound of array section.
Definition: Expr.h:6804
bool isOpenACCArraySection() const
Definition: Expr.h:6797
SourceLocation getColonLocFirst() const
Definition: Expr.h:6831
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2664
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:2719
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition: Expr.h:2693
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3514
QualType getElementType() const
Definition: Type.h:3526
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3840
Expr * getLHS() const
Definition: Expr.h:3889
SourceLocation getExprLoc() const
Definition: Expr.h:3880
Expr * getRHS() const
Definition: Expr.h:3891
Opcode getOpcode() const
Definition: Expr.h:3884
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition: Builtins.h:160
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1485
CXXTemporary * getTemporary()
Definition: ExprCXX.h:1503
const Expr * getSubExpr() const
Definition: ExprCXX.h:1507
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1540
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2799
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1367
bool isDynamicClass() const
Definition: DeclCXX.h:585
bool hasDefinition() const
Definition: DeclCXX.h:571
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1062
MSGuidDecl * getGuidDecl() const
Definition: ExprCXX.h:1108
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2820
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition: Expr.h:3011
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1638
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition: Expr.h:2990
Expr * getCallee()
Definition: Expr.h:2970
arg_range arguments()
Definition: Expr.h:3059
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition: Expr.cpp:1590
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3483
path_iterator path_begin()
Definition: Expr.h:3553
CastKind getCastKind() const
Definition: Expr.h:3527
bool changesVolatileQualification() const
Return.
Definition: Expr.h:3612
path_iterator path_end()
Definition: Expr.h:3554
Expr * getSubExpr()
Definition: Expr.h:3533
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition: CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
SanitizerSet SanitizeRecover
Set of sanitizer checks that are non-fatal (i.e.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
llvm::Value * getBasePointer() const
Definition: Address.h:170
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
CharUnits getAlignment() const
Definition: Address.h:166
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:226
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:203
Address setKnownNonNull()
Definition: Address.h:208
void setAlignment(CharUnits Value)
Definition: Address.h:168
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:158
bool isValid() const
Definition: Address.h:154
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:176
An aggregate value slot.
Definition: CGValue.h:512
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:580
Address getAddress() const
Definition: CGValue.h:652
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:621
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
RValue asRValue() const
Definition: CGValue.h:674
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:824
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:292
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:203
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition: CGBuilder.h:331
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:412
Address CreateLaunderInvariantGroup(Address Addr)
Definition: CGBuilder.h:436
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:428
Address CreateStripInvariantGroup(Address Addr)
Definition: CGBuilder.h:442
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:189
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:261
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:345
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:113
Abstract information about a function or function prototype.
Definition: CGCall.h:40
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:58
All available information about a concrete callee.
Definition: CGCall.h:62
CGCalleeInfo getAbstractInfo() const
Definition: CGCall.h:172
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition: CGCall.h:164
bool isPseudoDestructor() const
Definition: CGCall.h:161
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition: CGCall.h:115
unsigned getBuiltinID() const
Definition: CGCall.h:156
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:129
bool isBuiltin() const
Definition: CGCall.h:149
const FunctionDecl * getBuiltinDecl() const
Definition: CGCall.h:152
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition: CGCall.h:123
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, llvm::Value *ivarOffset)=0
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)=0
virtual llvm::Value * EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)=0
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, Address AddrWeakObj)=0
virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel)=0
Get the address of a selector for the specified name and type values.
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, bool threadlocal=false)=0
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
bool isNontemporalDecl(const ValueDecl *VD) const
Checks if the VD variable is marked as nontemporal declaration in current context.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:258
void add(RValue rvalue, QualType type)
Definition: CGCall.h:282
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitARCLoadWeakRetained(Address addr)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
LValue EmitInitListLValue(const InitListExpr *E)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Address EmitExtVectorElementLValue(LValue V)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
LValue EmitCallExprLValue(const CallExpr *E)
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitCountedByFieldExpr(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
LValue EmitCoyieldLValue(const CoyieldExpr *E)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
const TargetCodeGenInfo & getTargetHooks() const
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
LValue EmitMemberExpr(const MemberExpr *E)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitARCInitWeak(Address addr, llvm::Value *value)
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
LValue EmitStringLiteralLValue(const StringLiteral *E)
static Destroyer destroyARCStrongPrecise
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
const FieldDecl * FindCountedByField(const FieldDecl *FD)
Find the FieldDecl specified in a FAM's "counted_by" attribute.
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
static Destroyer destroyARCStrongImprecise
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
This class organizes the cross-function state that is used while generating LLVM code.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD)
Get the address of a GUID.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1235
void setDSOLocal(llvm::GlobalValue *GV) const
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
CGDebugInfo * getModuleDebugInfo()
ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E)
Returns a pointer to a constant global variable for the given file-scope compound literal expression.
llvm::ConstantInt * CreateCrossDsoCfiTypeId(llvm::Metadata *MD)
Generate a cross-DSO type identifier for MD.
void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C)
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition: CGCXX.cpp:220
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1131
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetInfo & getTarget() const
llvm::Metadata * CreateMetadataIdentifierForType(QualType T)
Create a metadata identifier for the given type.
llvm::Constant * getTypeDescriptorFromMap(QualType Ty)
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
llvm::MDNode * getTBAABaseTypeInfo(QualType QTy)
getTBAABaseTypeInfo - Get metadata that describes the given base access type.
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Metadata * CreateMetadataIdentifierGeneralized(QualType T)
Create a metadata identifier for the generalization of the given type.
const llvm::Triple & getTriple() const
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:243
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType)
getTBAAInfoForSubobject - Get TBAA information for an access with a given base lvalue.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ConstantAddress GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name=".str")
Return a pointer to a constant array for the given string literal.
ASTContext & getContext() const
ConstantAddress GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO)
Get the address of a template parameter object.
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
llvm::MDNode * getTBAATypeInfo(QualType QTy)
getTBAATypeInfo - Get metadata used to describe accesses to objects of the given type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, TBAAAccessInfo InfoB)
mergeTBAAInfoForConditionalOperator - Get merged TBAA information for the purposes of conditional ope...
llvm::LLVMContext & getLLVMContext()
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info, llvm::Function *F, bool IsThunk)
Set the LLVM function attributes (sext, zext, etc).
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F)
Set the LLVM function attributes which only apply to a function definition.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
ConstantAddress GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *)
Return a pointer to a constant array for the given ObjCEncodeExpr node.
ConstantAddress GetAddrOfConstantCString(const std::string &Str, const char *GlobalName=nullptr)
Returns a pointer to a character array containing the literal and a terminating '\0' character.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1632
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:680
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenTypes.h:104
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
Definition: CGCall.cpp:639
llvm::Type * ConvertTypeForMem(QualType T, bool ForBitField=false)
ConvertTypeForMem - Convert type T into a llvm::Type.
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:260
ConstantAddress withElementType(llvm::Type *ElemTy) const
Definition: Address.h:276
llvm::Constant * getPointer() const
Definition: Address.h:272
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:352
void mergeForCast(const LValueBaseInfo &Info)
Definition: CGValue.h:173
AlignmentSource getAlignmentSource() const
Definition: CGValue.h:170
LValue - This represents an lvalue references.
Definition: CGValue.h:181
bool isBitField() const
Definition: CGValue.h:283
bool isMatrixElt() const
Definition: CGValue.h:286
Expr * getBaseIvarExp() const
Definition: CGValue.h:335
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:417
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition: CGValue.h:486
void setObjCIvar(bool Value)
Definition: CGValue.h:301
bool isObjCArray() const
Definition: CGValue.h:303
bool isObjCStrong() const
Definition: CGValue.h:327
bool isGlobalObjCRef() const
Definition: CGValue.h:309
bool isVectorElt() const
Definition: CGValue.h:282
void setObjCArray(bool Value)
Definition: CGValue.h:304
bool isSimple() const
Definition: CGValue.h:281
bool isVolatileQualified() const
Definition: CGValue.h:288
llvm::Value * getMatrixIdx() const
Definition: CGValue.h:403
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:370
llvm::Value * getGlobalReg() const
Definition: CGValue.h:438
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:440
bool isVolatile() const
Definition: CGValue.h:331
const Qualifiers & getQuals() const
Definition: CGValue.h:341
bool isGlobalReg() const
Definition: CGValue.h:285
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:460
bool isObjCWeak() const
Definition: CGValue.h:324
unsigned getVRQualifiers() const
Definition: CGValue.h:290
void setThreadLocalRef(bool Value)
Definition: CGValue.h:313
LValue setKnownNonNull()
Definition: CGValue.h:355
bool isNonGC() const
Definition: CGValue.h:306
void setGlobalObjCRef(bool Value)
Definition: CGValue.h:310
bool isExtVectorElt() const
Definition: CGValue.h:284
llvm::Value * getVectorIdx() const
Definition: CGValue.h:390
void setNontemporal(bool Value)
Definition: CGValue.h:322
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:349
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:361
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition: CGValue.h:318
QualType getType() const
Definition: CGValue.h:294
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:432
bool isThreadLocalRef() const
Definition: CGValue.h:312
KnownNonNull_t isKnownNonNull() const
Definition: CGValue.h:352
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:338
void setNonGC(bool Value)
Definition: CGValue.h:307
Address getVectorAddress() const
Definition: CGValue.h:378
bool isNontemporal() const
Definition: CGValue.h:321
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:476
bool isObjCIvar() const
Definition: CGValue.h:300
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:450
void setAddress(Address address)
Definition: CGValue.h:375
void setBaseIvarExp(Expr *V)
Definition: CGValue.h:336
RValue asAggregateRValue(CodeGenFunction &CGF) const
Definition: CGValue.h:506
Address getExtVectorAddress() const
Definition: CGValue.h:409
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:496
Address getMatrixAddress() const
Definition: CGValue.h:395
Address getBitFieldAddress() const
Definition: CGValue.h:423
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:41
bool isScalar() const
Definition: CGValue.h:63
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:124
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:107
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:82
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
An abstract representation of an aligned address.
Definition: Address.h:41
RawAddress withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:99
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:76
llvm::Value * getPointer() const
Definition: Address.h:65
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:356
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:217
Complex values, per C99 6.2.5p11.
Definition: Type.h:3082
QualType getElementType() const
Definition: Type.h:3092
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3413
bool isFileScope() const
Definition: Expr.h:3440
const Expr * getInitializer() const
Definition: Expr.h:3436
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:195
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4163
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4181
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition: Type.h:3243
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
Definition: DeclBase.cpp:1946
decl_range decls() const
decls_begin/decls_end - Iterate over the declarations stored in this context.
Definition: DeclBase.h:2322
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition: Expr.h:1458
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition: Expr.cpp:488
ValueDecl * getDecl()
Definition: Expr.h:1328
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition: Expr.h:1452
SourceLocation getLocation() const
Definition: Expr.h:1336
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:579
SourceLocation getLocation() const
Definition: DeclBase.h:445
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:530
DeclContext * getDeclContext()
Definition: DeclBase.h:454
bool hasAttr() const
Definition: DeclBase.h:583
void ConvertArgToString(ArgumentKind Kind, intptr_t Val, StringRef Modifier, StringRef Argument, ArrayRef< ArgumentValue > PrevArgs, SmallVectorImpl< char > &Output, ArrayRef< intptr_t > QualTypeVals) const
Converts a diagnostic argument (as an intptr_t) into the string that represents it.
Definition: Diagnostic.h:880
Represents an enum.
Definition: Decl.h:3868
bool isFixed() const
Returns true if this is an Objective-C, C++11, or Microsoft-style enumeration with a fixed underlying...
Definition: Decl.h:4082
void getValueRange(llvm::APInt &Max, llvm::APInt &Min) const
Calculates the [Min,Max) values the enum can store based on the NumPositiveBits and NumNegativeBits.
Definition: Decl.cpp:4975
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:5571
EnumDecl * getDecl() const
Definition: Type.h:5578
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3730
This represents one expression.
Definition: Expr.h:110
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition: Expr.cpp:82
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3086
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition: Expr.h:437
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition: Expr.cpp:3059
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3047
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3055
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition: Expr.h:278
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition: Expr.cpp:1545
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3556
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3039
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
bool isFlexibleArrayMemberLike(ASTContext &Context, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution=false) const
Check whether this array fits the idiom of a flexible array member, depending on the value of -fstric...
Definition: Expr.cpp:206
QualType getType() const
Definition: Expr.h:142
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition: Expr.cpp:2970
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6113
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition: Expr.cpp:4278
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition: Expr.cpp:4310
const Expr * getBase() const
Definition: Expr.h:6130
Represents a member of a struct/union/class.
Definition: Decl.h:3058
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3149
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4646
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3271
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition: Decl.cpp:4604
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:1039
const Expr * getSubExpr() const
Definition: Expr.h:1052
Represents a function declaration or definition.
Definition: Decl.h:1971
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3632
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4652
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
Describes an C or C++ initializer list.
Definition: Expr.h:4847
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition: Expr.cpp:2432
const Expr * getInit(unsigned Init) const
Definition: Expr.h:4893
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:467
virtual void mangleCXXRTTI(QualType T, raw_ostream &)=0
unsigned getBlockId(const BlockDecl *BD, bool Local)
Definition: Mangle.h:84
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4686
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition: ExprCXX.h:4711
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition: ExprCXX.h:4703
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition: ExprCXX.h:4736
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2742
bool isIncomplete() const
Definition: Expr.h:2762
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3172
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition: Expr.h:3255
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why? This is only meaningful if the named memb...
Definition: Expr.h:3396
Expr * getBase() const
Definition: Expr.h:3249
bool isArrow() const
Definition: Expr.h:3356
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3456
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition: NSAPI.cpp:476
This represents a decl that may have a name.
Definition: Decl.h:249
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
A C++ nested-name-specifier augmented with source location information.
bool containsType(SanitizerMask Mask, StringRef MangledTypeName, StringRef Category=StringRef()) const
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1950
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
ObjCIvarDecl * getDecl()
Definition: ExprObjC.h:579
bool isArrow() const
Definition: ExprObjC.h:587
const Expr * getBase() const
Definition: ExprObjC.h:583
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
const ObjCMethodDecl * getMethodDecl() const
Definition: ExprObjC.h:1356
QualType getReturnType() const
Definition: DeclObjC.h:329
Represents a class type in Objective C.
Definition: Type.h:6750
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
Selector getSelector() const
Definition: ExprObjC.h:469
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1168
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1218
bool isUnique() const
Definition: Expr.h:1226
ParenExpr - This represents a parethesized expression, e.g.
Definition: Expr.h:2130
const Expr * getSubExpr() const
Definition: Expr.h:2145
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3135
QualType getPointeeType() const
Definition: Type.h:3145
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1986
StringRef getIdentKindName() const
Definition: Expr.h:2043
PredefinedIdentKind getIdentKind() const
Definition: Expr.h:2021
StringLiteral * getFunctionName()
Definition: Expr.h:2030
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
bool isValid() const
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6305
semantics_iterator semantics_end()
Definition: Expr.h:6377
semantics_iterator semantics_begin()
Definition: Expr.h:6371
const Expr *const * const_semantics_iterator
Definition: Expr.h:6370
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition: Expr.h:6358
A (possibly-)qualified type.
Definition: Type.h:940
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:7439
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1007
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7481
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7395
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1432
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:7556
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:7449
QualType withCVRQualifiers(unsigned CVR) const
Definition: Type.h:1174
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1530
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1039
The collection of all-type qualifiers we support.
Definition: Type.h:318
unsigned getCVRQualifiers() const
Definition: Type.h:474
GC getObjCGCAttr() const
Definition: Type.h:505
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:347
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:340
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:336
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:350
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:353
bool hasConst() const
Definition: Type.h:443
void addCVRQualifiers(unsigned mask)
Definition: Type.h:488
void removeObjCGCAttr()
Definition: Type.h:509
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition: Type.h:636
void setAddressSpace(LangAS space)
Definition: Type.h:577
bool hasVolatile() const
Definition: Type.h:453
ObjCLifetime getObjCLifetime() const
Definition: Type.h:531
void addVolatile()
Definition: Type.h:456
Represents a struct/union/class.
Definition: Decl.h:4169
field_range fields() const
Definition: Decl.h:4375
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition: Decl.h:4360
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5545
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:204
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4383
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1358
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1773
bool isUnion() const
Definition: Decl.h:3791
Exposes information about the current target.
Definition: TargetInfo.h:213
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1235
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1303
The type-property cache.
Definition: Type.cpp:4353
The base class of the type hierarchy.
Definition: Type.h:1813
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1870
bool isBlockPointerType() const
Definition: Type.h:7616
bool isVoidType() const
Definition: Type.h:7901
bool isBooleanType() const
Definition: Type.h:8029
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2155
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition: Type.cpp:1887
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2134
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition: Type.h:8195
bool isArrayType() const
Definition: Type.h:7674
bool isFunctionPointerType() const
Definition: Type.h:7642
bool isCountAttributedType() const
Definition: Type.cpp:683
bool isArithmeticType() const
Definition: Type.cpp:2269
bool isConstantMatrixType() const
Definition: Type.h:7732
bool isPointerType() const
Definition: Type.h:7608
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:7941
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8186
bool isReferenceType() const
Definition: Type.h:7620
bool isVariableArrayType() const
Definition: Type.h:7686
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:694
bool isExtVectorBoolType() const
Definition: Type.h:7722
bool isAnyComplexType() const
Definition: Type.h:7710
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition: Type.h:8069
bool isAtomicType() const
Definition: Type.h:7753
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2667
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2350
bool isFunctionType() const
Definition: Type.h:7604
bool isObjCObjectPointerType() const
Definition: Type.h:7740
bool isVectorType() const
Definition: Type.h:7714
bool isFloatingType() const
Definition: Type.cpp:2237
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8119
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition: Type.cpp:604
bool isRecordType() const
Definition: Type.h:7702
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.cpp:1874
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2183
SourceLocation getExprLoc() const
Definition: Expr.h:2311
Expr * getSubExpr() const
Definition: Expr.h:2228
Opcode getOpcode() const
Definition: Expr.h:2223
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4667
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:706
QualType getType() const
Definition: Decl.h:717
QualType getType() const
Definition: Value.cpp:234
Represents a variable declaration or definition.
Definition: Decl.h:918
TLSKind getTLSKind() const
Definition: Decl.cpp:2165
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition: Decl.cpp:2363
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition: Decl.h:1171
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition: Decl.h:944
@ TLS_None
Not a TLS variable.
Definition: Decl.h:938
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3743
Represents a GCC generic vector type.
Definition: Type.h:3965
unsigned getNumElements() const
Definition: Type.h:3980
#define INT_MIN
Definition: limits.h:51
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:140
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
@ ARCImpreciseLifetime
Definition: CGValue.h:135
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition: CGValue.h:158
@ NotKnownNonNull
Definition: Address.h:32
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< FunctionType > functionType
Matches FunctionType nodes.
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
bool This(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1893
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1867
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1385
bool Cast(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1707
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition: Specifiers.h:151
@ SC_Register
Definition: Specifiers.h:254
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition: Specifiers.h:324
@ SD_Thread
Thread storage duration.
Definition: Specifiers.h:327
@ SD_Static
Static storage duration.
Definition: Specifiers.h:328
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition: Specifiers.h:325
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:326
@ SD_Dynamic
Dynamic storage duration.
Definition: Specifiers.h:329
@ Result
The result type of a method or function.
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
llvm::hash_code hash_value(const CustomizableOptional< T > &O)
const FunctionProtoType * T
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:86
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
@ Other
Other implicit parameter.
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition: Specifiers.h:174
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition: Specifiers.h:177
unsigned long uint64_t
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
Definition: CodeGenTBAA.h:105
uint64_t Offset
Offset - The byte offset of the final access within the base one.
Definition: CodeGenTBAA.h:109
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
Definition: CodeGenTBAA.h:112
llvm::MDNode * BaseType
BaseType - The base/leading access type.
Definition: CodeGenTBAA.h:101
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition: Expr.h:609
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:168
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition: Expr.h:66