clang 23.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGObjCRuntime.h"
21#include "CGOpenMPRuntime.h"
22#include "CGRecordLayout.h"
23#include "CodeGenFunction.h"
24#include "CodeGenModule.h"
25#include "CodeGenPGO.h"
26#include "ConstantEmitter.h"
27#include "TargetInfo.h"
29#include "clang/AST/ASTLambda.h"
30#include "clang/AST/Attr.h"
31#include "clang/AST/DeclObjC.h"
32#include "clang/AST/Expr.h"
34#include "clang/AST/NSAPI.h"
39#include "clang/Basic/Module.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/StringExtras.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/Support/ConvertUTF.h"
51#include "llvm/Support/Endian.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/Path.h"
54#include "llvm/Support/xxhash.h"
55#include "llvm/Transforms/Utils/SanitizerStats.h"
56
57#include <numeric>
58#include <optional>
59#include <string>
60
61using namespace clang;
62using namespace CodeGen;
63
64namespace clang {
65// TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed
66// by -fsanitize-skip-hot-cutoff
67llvm::cl::opt<bool> ClSanitizeGuardChecks(
68 "ubsan-guard-checks", llvm::cl::Optional,
69 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
70
71} // namespace clang
72
73//===--------------------------------------------------------------------===//
74// Defines for metadata
75//===--------------------------------------------------------------------===//
76
77// Those values are crucial to be the SAME as in ubsan runtime library.
79 /// An integer type.
80 TK_Integer = 0x0000,
81 /// A floating-point type.
82 TK_Float = 0x0001,
83 /// An _BitInt(N) type.
84 TK_BitInt = 0x0002,
85 /// Any other type. The value representation is unspecified.
86 TK_Unknown = 0xffff
87};
88
89//===--------------------------------------------------------------------===//
90// Miscellaneous Helper Methods
91//===--------------------------------------------------------------------===//
92
93static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID) {
94 switch (ID) {
95#define SANITIZER_CHECK(Enum, Name, Version, Msg) \
96 case SanitizerHandler::Enum: \
97 return Msg;
99#undef SANITIZER_CHECK
100 }
101 llvm_unreachable("unhandled switch case");
102}
103
104/// CreateTempAlloca - This creates a alloca and inserts it into the entry
105/// block.
108 const Twine &Name,
109 llvm::Value *ArraySize) {
110 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
111 Alloca->setAlignment(Align.getAsAlign());
112 return RawAddress(Alloca, Ty, Align, KnownNonNull);
113}
114
115RawAddress CodeGenFunction::MaybeCastStackAddressSpace(RawAddress Alloca,
116 LangAS DestLangAS,
117 llvm::Value *ArraySize) {
118
119 llvm::Value *V = Alloca.getPointer();
120 // Alloca always returns a pointer in alloca address space, which may
121 // be different from the type defined by the language. For example,
122 // in C++ the auto variables are in the default address space. Therefore
123 // cast alloca to the default address space when necessary.
124
125 unsigned DestAddrSpace = getContext().getTargetAddressSpace(DestLangAS);
126 if (DestAddrSpace != Alloca.getAddressSpace()) {
127 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
128 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
129 // otherwise alloca is inserted at the current insertion point of the
130 // builder.
131 if (!ArraySize)
132 Builder.SetInsertPoint(getPostAllocaInsertPoint());
133 V = performAddrSpaceCast(V, Builder.getPtrTy(DestAddrSpace));
134 }
135
136 return RawAddress(V, Alloca.getElementType(), Alloca.getAlignment(),
138}
139
141 CharUnits Align, const Twine &Name,
142 llvm::Value *ArraySize,
143 RawAddress *AllocaAddr) {
144 RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
145 if (AllocaAddr)
146 *AllocaAddr = Alloca;
147 return MaybeCastStackAddressSpace(Alloca, DestLangAS, ArraySize);
148}
149
150/// CreateTempAlloca - This creates an alloca and inserts it into the entry
151/// block if \p ArraySize is nullptr, otherwise inserts it at the current
152/// insertion point of the builder.
153llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
154 const Twine &Name,
155 llvm::Value *ArraySize) {
156 llvm::AllocaInst *Alloca;
157 if (ArraySize)
158 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
159 else
160 Alloca =
161 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
162 ArraySize, Name, AllocaInsertPt->getIterator());
163 if (SanOpts.Mask & SanitizerKind::Address) {
164 Alloca->addAnnotationMetadata({"alloca_name_altered", Name.str()});
165 }
166 if (Allocas) {
167 Allocas->Add(Alloca);
168 }
169 return Alloca;
170}
171
172/// CreateDefaultAlignTempAlloca - This creates an alloca with the
173/// default alignment of the corresponding LLVM type, which is *not*
174/// guaranteed to be related in any way to the expected alignment of
175/// an AST type that might have been lowered to Ty.
177 const Twine &Name) {
178 CharUnits Align =
179 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
180 return CreateTempAlloca(Ty, Align, Name);
181}
182
184 const Twine &Name) {
186 return CreateTempAllocaWithoutCast(ConvertType(Ty), Align, Name, nullptr);
187}
188
190 RawAddress *Alloca) {
191 // FIXME: Should we prefer the preferred type alignment here?
192 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
193}
194
196 const Twine &Name,
197 RawAddress *Alloca) {
199 /*ArraySize=*/nullptr, Alloca);
200
201 if (Ty->isConstantMatrixType()) {
202 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
203 auto *ArrayElementTy = ArrayTy->getElementType();
204 auto ArrayElements = ArrayTy->getNumElements();
205 if (getContext().getLangOpts().HLSL) {
206 auto *VectorTy = cast<llvm::FixedVectorType>(ArrayElementTy);
207 ArrayElementTy = VectorTy->getElementType();
208 ArrayElements *= VectorTy->getNumElements();
209 }
210 auto *VectorTy = llvm::FixedVectorType::get(ArrayElementTy, ArrayElements);
211
212 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
214 }
215 return Result;
216}
217
219 CharUnits Align,
220 const Twine &Name) {
221 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
222}
223
225 const Twine &Name) {
226 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
227 Name);
228}
229
230/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
231/// expression and compare the result against zero, returning an Int1Ty value.
233 PGO->setCurrentStmt(E);
234 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
235 llvm::Value *MemPtr = EmitScalarExpr(E);
236 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
237 }
238
239 QualType BoolTy = getContext().BoolTy;
240 SourceLocation Loc = E->getExprLoc();
241 CGFPOptionsRAII FPOptsRAII(*this, E);
242 if (!E->getType()->isAnyComplexType())
243 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
244
246 Loc);
247}
248
249/// EmitIgnoredExpr - Emit code to compute the specified expression,
250/// ignoring the result.
252 if (E->isPRValue())
253 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
254
255 // if this is a bitfield-resulting conditional operator, we can special case
256 // emit this. The normal 'EmitLValue' version of this is particularly
257 // difficult to codegen for, since creating a single "LValue" for two
258 // different sized arguments here is not particularly doable.
259 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
261 if (CondOp->getObjectKind() == OK_BitField)
262 return EmitIgnoredConditionalOperator(CondOp);
263 }
264
265 // Just emit it as an l-value and drop the result.
266 EmitLValue(E);
267}
268
269/// EmitAnyExpr - Emit code to compute the specified expression which
270/// can have any type. The result is returned as an RValue struct.
271/// If this is an aggregate expression, AggSlot indicates where the
272/// result should be returned.
274 AggValueSlot aggSlot,
275 bool ignoreResult) {
276 switch (getEvaluationKind(E->getType())) {
277 case TEK_Scalar:
278 return RValue::get(EmitScalarExpr(E, ignoreResult));
279 case TEK_Complex:
280 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
281 case TEK_Aggregate:
282 if (!ignoreResult && aggSlot.isIgnored())
283 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
284 EmitAggExpr(E, aggSlot);
285 return aggSlot.asRValue();
286 }
287 llvm_unreachable("bad evaluation kind");
288}
289
290/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
291/// always be accessible even if no aggregate location is provided.
294
296 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
297 return EmitAnyExpr(E, AggSlot);
298}
299
300/// EmitAnyExprToMem - Evaluate an expression into a given memory
301/// location.
303 Address Location,
304 Qualifiers Quals,
305 bool IsInit) {
306 // FIXME: This function should take an LValue as an argument.
307 switch (getEvaluationKind(E->getType())) {
308 case TEK_Complex:
310 /*isInit*/ false);
311 return;
312
313 case TEK_Aggregate: {
314 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
319 return;
320 }
321
322 case TEK_Scalar: {
323 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
324 LValue LV = MakeAddrLValue(Location, E->getType());
326 return;
327 }
328 }
329 llvm_unreachable("bad evaluation kind");
330}
331
333 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
334 QualType Type = LV.getType();
335 switch (getEvaluationKind(Type)) {
336 case TEK_Complex:
337 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
338 return;
339 case TEK_Aggregate:
343 AggValueSlot::MayOverlap, IsZeroed));
344 return;
345 case TEK_Scalar:
346 if (LV.isSimple())
347 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
348 else
350 return;
351 }
352 llvm_unreachable("bad evaluation kind");
353}
354
355static void
357 const Expr *E, Address ReferenceTemporary) {
358 // Objective-C++ ARC:
359 // If we are binding a reference to a temporary that has ownership, we
360 // need to perform retain/release operations on the temporary.
361 //
362 // FIXME: This should be looking at E, not M.
363 if (auto Lifetime = M->getType().getObjCLifetime()) {
364 switch (Lifetime) {
367 // Carry on to normal cleanup handling.
368 break;
369
371 // Nothing to do; cleaned up by an autorelease pool.
372 return;
373
376 switch (StorageDuration Duration = M->getStorageDuration()) {
377 case SD_Static:
378 // Note: we intentionally do not register a cleanup to release
379 // the object on program termination.
380 return;
381
382 case SD_Thread:
383 // FIXME: We should probably register a cleanup in this case.
384 return;
385
386 case SD_Automatic:
390 if (Lifetime == Qualifiers::OCL_Strong) {
391 const ValueDecl *VD = M->getExtendingDecl();
392 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
393 VD->hasAttr<ObjCPreciseLifetimeAttr>();
397 } else {
398 // __weak objects always get EH cleanups; otherwise, exceptions
399 // could cause really nasty crashes instead of mere leaks.
402 }
403 if (Duration == SD_FullExpression)
404 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
405 M->getType(), *Destroy,
407 else
408 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
409 M->getType(),
410 *Destroy, CleanupKind & EHCleanup);
411 return;
412
413 case SD_Dynamic:
414 llvm_unreachable("temporary cannot have dynamic storage duration");
415 }
416 llvm_unreachable("unknown storage duration");
417 }
418 }
419
421 if (DK != QualType::DK_none) {
422 switch (M->getStorageDuration()) {
423 case SD_Static:
424 case SD_Thread: {
425 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
426 if (const auto *ClassDecl =
428 ClassDecl && !ClassDecl->hasTrivialDestructor())
429 // Get the destructor for the reference temporary.
430 ReferenceTemporaryDtor = ClassDecl->getDestructor();
431
432 if (!ReferenceTemporaryDtor)
433 return;
434
435 llvm::FunctionCallee CleanupFn;
436 llvm::Constant *CleanupArg;
437 if (E->getType()->isArrayType()) {
439 ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject,
440 CGF.getLangOpts().Exceptions,
441 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
442 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
443 } else {
444 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
445 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
446 CleanupArg =
447 cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
448 }
450 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
451 } break;
453 CGF.pushDestroy(DK, ReferenceTemporary, E->getType());
454 break;
455 case SD_Automatic:
456 CGF.pushLifetimeExtendedDestroy(DK, ReferenceTemporary, E->getType());
457 break;
458 case SD_Dynamic:
459 llvm_unreachable("temporary cannot have dynamic storage duration");
460 }
461 }
462}
463
466 const Expr *Inner,
467 RawAddress *Alloca = nullptr) {
468 switch (M->getStorageDuration()) {
470 case SD_Automatic: {
471 // If we have a constant temporary array or record try to promote it into a
472 // constant global under the same rules a normal constant would've been
473 // promoted. This is easier on the optimizer and generally emits fewer
474 // instructions.
475 QualType Ty = Inner->getType();
476 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
477 (Ty->isArrayType() || Ty->isRecordType()) &&
478 Ty.isConstantStorage(CGF.getContext(), true, false))
479 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
480 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
481 auto *GV = new llvm::GlobalVariable(
482 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
483 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
484 llvm::GlobalValue::NotThreadLocal,
486 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
487 GV->setAlignment(alignment.getAsAlign());
488 llvm::Constant *C = GV;
489 if (AS != LangAS::Default)
491 GV, llvm::PointerType::get(
492 CGF.getLLVMContext(),
494 // FIXME: Should we put the new global into a COMDAT?
495 return RawAddress(C, GV->getValueType(), alignment);
496 }
497 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
498 }
499 case SD_Thread:
500 case SD_Static:
501 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
502
503 case SD_Dynamic:
504 llvm_unreachable("temporary can't have dynamic storage duration");
505 }
506 llvm_unreachable("unknown storage duration");
507}
508
509/// Helper method to check if the underlying ABI is AAPCS
510static bool isAAPCS(const TargetInfo &TargetInfo) {
511 return TargetInfo.getABI().starts_with("aapcs");
512}
513
516 const Expr *E = M->getSubExpr();
517
518 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
519 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
520 "Reference should never be pseudo-strong!");
521
522 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
523 // as that will cause the lifetime adjustment to be lost for ARC
524 auto ownership = M->getType().getObjCLifetime();
525 if (ownership != Qualifiers::OCL_None &&
526 ownership != Qualifiers::OCL_ExplicitNone) {
527 RawAddress Object = createReferenceTemporary(*this, M, E);
528 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
529 llvm::Type *Ty = ConvertTypeForMem(E->getType());
530 Object = Object.withElementType(Ty);
531
532 // createReferenceTemporary will promote the temporary to a global with a
533 // constant initializer if it can. It can only do this to a value of
534 // ARC-manageable type if the value is global and therefore "immune" to
535 // ref-counting operations. Therefore we have no need to emit either a
536 // dynamic initialization or a cleanup and we can just return the address
537 // of the temporary.
538 if (Var->hasInitializer())
539 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
540
541 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
542 }
543 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
545
546 switch (getEvaluationKind(E->getType())) {
547 default: llvm_unreachable("expected scalar or aggregate expression");
548 case TEK_Scalar:
549 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
550 break;
551 case TEK_Aggregate: {
553 E->getType().getQualifiers(),
558 break;
559 }
560 }
561
562 pushTemporaryCleanup(*this, M, E, Object);
563 return RefTempDst;
564 }
565
568 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
569
570 for (const auto &Ignored : CommaLHSs)
571 EmitIgnoredExpr(Ignored);
572
573 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
574 if (opaque->getType()->isRecordType()) {
575 assert(Adjustments.empty());
576 return EmitOpaqueValueLValue(opaque);
577 }
578 }
579
580 // Create and initialize the reference temporary.
581 RawAddress Alloca = Address::invalid();
582 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
583 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
584 Object.getPointer()->stripPointerCasts())) {
585 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
586 Object = Object.withElementType(TemporaryType);
587 // If the temporary is a global and has a constant initializer or is a
588 // constant temporary that we promoted to a global, we may have already
589 // initialized it.
590 if (!Var->hasInitializer()) {
591 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
593 if (RefType.getPointerAuth()) {
594 // Use the qualifier of the reference temporary to sign the pointer.
595 LValue LV = MakeRawAddrLValue(Object.getPointer(), RefType,
596 Object.getAlignment());
597 EmitScalarInit(E, M->getExtendingDecl(), LV, false);
598 } else {
599 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true);
600 }
601 }
602 } else {
603 switch (M->getStorageDuration()) {
604 case SD_Automatic:
605 if (EmitLifetimeStart(Alloca.getPointer())) {
607 Alloca);
608 }
609 break;
610
611 case SD_FullExpression: {
612 if (!ShouldEmitLifetimeMarkers)
613 break;
614
615 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
616 // marker. Instead, start the lifetime of a conditional temporary earlier
617 // so that it's unconditional. Don't do this with sanitizers which need
618 // more precise lifetime marks. However when inside an "await.suspend"
619 // block, we should always avoid conditional cleanup because it creates
620 // boolean marker that lives across await_suspend, which can destroy coro
621 // frame.
622 ConditionalEvaluation *OldConditional = nullptr;
623 CGBuilderTy::InsertPoint OldIP;
625 ((!SanOpts.has(SanitizerKind::HWAddress) &&
626 !SanOpts.has(SanitizerKind::Memory) &&
627 !SanOpts.has(SanitizerKind::MemtagStack) &&
628 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
629 inSuspendBlock())) {
630 OldConditional = OutermostConditional;
631 OutermostConditional = nullptr;
632
633 OldIP = Builder.saveIP();
634 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
635 Builder.restoreIP(CGBuilderTy::InsertPoint(
636 Block, llvm::BasicBlock::iterator(Block->back())));
637 }
638
639 if (EmitLifetimeStart(Alloca.getPointer())) {
641 }
642
643 if (OldConditional) {
644 OutermostConditional = OldConditional;
645 Builder.restoreIP(OldIP);
646 }
647 break;
648 }
649
650 default:
651 break;
652 }
653 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
654 }
655 pushTemporaryCleanup(*this, M, E, Object);
656
657 // Perform derived-to-base casts and/or field accesses, to get from the
658 // temporary object we created (and, potentially, for which we extended
659 // the lifetime) to the subobject we're binding the reference to.
660 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
661 switch (Adjustment.Kind) {
663 Object =
664 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
665 Adjustment.DerivedToBase.BasePath->path_begin(),
666 Adjustment.DerivedToBase.BasePath->path_end(),
667 /*NullCheckValue=*/ false, E->getExprLoc());
668 break;
669
672 LV = EmitLValueForField(LV, Adjustment.Field);
673 assert(LV.isSimple() &&
674 "materialized temporary field is not a simple lvalue");
675 Object = LV.getAddress();
676 break;
677 }
678
680 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
682 E, Object, Ptr, Adjustment.Ptr.MPT, /*IsInBounds=*/true);
683 break;
684 }
685 }
686 }
687
688 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
689}
690
691RValue
693 // Emit the expression as an lvalue.
694 LValue LV = EmitLValue(E);
695 assert(LV.isSimple());
696 llvm::Value *Value = LV.getPointer(*this);
697
699 // C++11 [dcl.ref]p5 (as amended by core issue 453):
700 // If a glvalue to which a reference is directly bound designates neither
701 // an existing object or function of an appropriate type nor a region of
702 // storage of suitable size and alignment to contain an object of the
703 // reference's type, the behavior is undefined.
704 QualType Ty = E->getType();
706 }
707
708 return RValue::get(Value);
709}
710
711
712/// getAccessedFieldNo - Given an encoded value and a result number, return the
713/// input field number being accessed.
715 const llvm::Constant *Elts) {
716 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
717 ->getZExtValue();
718}
719
720static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
721 llvm::Value *Ptr) {
722 llvm::Value *A0 =
723 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
724 llvm::Value *A1 =
725 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
726 return Builder.CreateXor(Acc, A1);
727}
728
733
736 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
737 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
740}
741
743 return SanOpts.has(SanitizerKind::Null) ||
744 SanOpts.has(SanitizerKind::Alignment) ||
745 SanOpts.has(SanitizerKind::ObjectSize) ||
746 SanOpts.has(SanitizerKind::Vptr);
747}
748
750 llvm::Value *Ptr, QualType Ty,
751 CharUnits Alignment,
752 SanitizerSet SkippedChecks,
753 llvm::Value *ArraySize) {
755 return;
756
757 // Don't check pointers outside the default address space. The null check
758 // isn't correct, the object-size check isn't supported by LLVM, and we can't
759 // communicate the addresses to the runtime handler for the vptr check.
760 if (Ptr->getType()->getPointerAddressSpace())
761 return;
762
763 // Don't check pointers to volatile data. The behavior here is implementation-
764 // defined.
765 if (Ty.isVolatileQualified())
766 return;
767
768 // Quickly determine whether we have a pointer to an alloca. It's possible
769 // to skip null checks, and some alignment checks, for these pointers. This
770 // can reduce compile-time significantly.
771 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
772
773 llvm::Value *IsNonNull = nullptr;
774 bool IsGuaranteedNonNull =
775 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
776
777 llvm::BasicBlock *Done = nullptr;
778 bool DoneViaNullSanitize = false;
779
780 {
781 auto CheckHandler = SanitizerHandler::TypeMismatch;
782 SanitizerDebugLocation SanScope(this,
783 {SanitizerKind::SO_Null,
784 SanitizerKind::SO_ObjectSize,
785 SanitizerKind::SO_Alignment},
786 CheckHandler);
787
789 Checks;
790
791 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
792 bool AllowNullPointers = isNullPointerAllowed(TCK);
793 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
794 !IsGuaranteedNonNull) {
795 // The glvalue must not be an empty glvalue.
796 IsNonNull = Builder.CreateIsNotNull(Ptr);
797
798 // The IR builder can constant-fold the null check if the pointer points
799 // to a constant.
800 IsGuaranteedNonNull = IsNonNull == True;
801
802 // Skip the null check if the pointer is known to be non-null.
803 if (!IsGuaranteedNonNull) {
804 if (AllowNullPointers) {
805 // When performing pointer casts, it's OK if the value is null.
806 // Skip the remaining checks in that case.
807 Done = createBasicBlock("null");
808 DoneViaNullSanitize = true;
809 llvm::BasicBlock *Rest = createBasicBlock("not.null");
810 Builder.CreateCondBr(IsNonNull, Rest, Done);
811 EmitBlock(Rest);
812 } else {
813 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
814 }
815 }
816 }
817
818 if (SanOpts.has(SanitizerKind::ObjectSize) &&
819 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
820 !Ty->isIncompleteType()) {
821 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
822 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
823 if (ArraySize)
824 Size = Builder.CreateMul(Size, ArraySize);
825
826 // Degenerate case: new X[0] does not need an objectsize check.
827 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
828 if (!ConstantSize || !ConstantSize->isNullValue()) {
829 // The glvalue must refer to a large enough storage region.
830 // FIXME: If Address Sanitizer is enabled, insert dynamic
831 // instrumentation
832 // to check this.
833 // FIXME: Get object address space
834 llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy};
835 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
836 llvm::Value *Min = Builder.getFalse();
837 llvm::Value *NullIsUnknown = Builder.getFalse();
838 llvm::Value *Dynamic = Builder.getFalse();
839 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
840 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
841 Checks.push_back(
842 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
843 }
844 }
845
846 llvm::MaybeAlign AlignVal;
847 llvm::Value *PtrAsInt = nullptr;
848
849 if (SanOpts.has(SanitizerKind::Alignment) &&
850 !SkippedChecks.has(SanitizerKind::Alignment)) {
851 AlignVal = Alignment.getAsMaybeAlign();
852 if (!Ty->isIncompleteType() && !AlignVal)
853 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
854 /*ForPointeeType=*/true)
855 .getAsMaybeAlign();
856
857 // The glvalue must be suitably aligned.
858 if (AlignVal && *AlignVal > llvm::Align(1) &&
859 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
860 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
861 llvm::Value *Align = Builder.CreateAnd(
862 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
863 llvm::Value *Aligned =
864 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
865 if (Aligned != True)
866 Checks.push_back(
867 std::make_pair(Aligned, SanitizerKind::SO_Alignment));
868 }
869 }
870
871 if (Checks.size() > 0) {
872 llvm::Constant *StaticData[] = {
874 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
875 llvm::ConstantInt::get(Int8Ty, TCK)};
876 EmitCheck(Checks, CheckHandler, StaticData, PtrAsInt ? PtrAsInt : Ptr);
877 }
878 }
879
880 // If possible, check that the vptr indicates that there is a subobject of
881 // type Ty at offset zero within this object.
882 //
883 // C++11 [basic.life]p5,6:
884 // [For storage which does not refer to an object within its lifetime]
885 // The program has undefined behavior if:
886 // -- the [pointer or glvalue] is used to access a non-static data member
887 // or call a non-static member function
888 if (SanOpts.has(SanitizerKind::Vptr) &&
889 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
890 SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr},
891 SanitizerHandler::DynamicTypeCacheMiss);
892
893 // Ensure that the pointer is non-null before loading it. If there is no
894 // compile-time guarantee, reuse the run-time null check or emit a new one.
895 if (!IsGuaranteedNonNull) {
896 if (!IsNonNull)
897 IsNonNull = Builder.CreateIsNotNull(Ptr);
898 if (!Done)
899 Done = createBasicBlock("vptr.null");
900 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
901 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
902 EmitBlock(VptrNotNull);
903 }
904
905 // Compute a deterministic hash of the mangled name of the type.
906 SmallString<64> MangledName;
907 llvm::raw_svector_ostream Out(MangledName);
908 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
909 Out);
910
911 // Contained in NoSanitizeList based on the mangled type.
912 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
913 Out.str())) {
914 // Load the vptr, and mix it with TypeHash.
915 llvm::Value *TypeHash =
916 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
917
918 llvm::Type *VPtrTy = llvm::PointerType::get(getLLVMContext(), 0);
919 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
920 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
921 Ty->getAsCXXRecordDecl(),
923 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
924
925 llvm::Value *Hash =
926 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
927 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
928
929 // Look the hash up in our cache.
930 const int CacheSize = 128;
931 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
932 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
933 "__ubsan_vptr_type_cache");
934 llvm::Value *Slot = Builder.CreateAnd(Hash,
935 llvm::ConstantInt::get(IntPtrTy,
936 CacheSize-1));
937 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
938 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
939 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
941
942 // If the hash isn't in the cache, call a runtime handler to perform the
943 // hard work of checking whether the vptr is for an object of the right
944 // type. This will either fill in the cache and return, or produce a
945 // diagnostic.
946 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
947 llvm::Constant *StaticData[] = {
950 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
951 llvm::ConstantInt::get(Int8Ty, TCK)
952 };
953 llvm::Value *DynamicData[] = { Ptr, Hash };
954 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
955 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
956 DynamicData);
957 }
958 }
959
960 if (Done) {
961 SanitizerDebugLocation SanScope(
962 this,
963 {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr},
964 DoneViaNullSanitize ? SanitizerHandler::TypeMismatch
965 : SanitizerHandler::DynamicTypeCacheMiss);
966 Builder.CreateBr(Done);
967 EmitBlock(Done);
968 }
969}
970
972 QualType EltTy) {
974 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
975 if (!EltSize)
976 return nullptr;
977
978 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
979 if (!ArrayDeclRef)
980 return nullptr;
981
982 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
983 if (!ParamDecl)
984 return nullptr;
985
986 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
987 if (!POSAttr)
988 return nullptr;
989
990 // Don't load the size if it's a lower bound.
991 int POSType = POSAttr->getType();
992 if (POSType != 0 && POSType != 1)
993 return nullptr;
994
995 // Find the implicit size parameter.
996 auto PassedSizeIt = SizeArguments.find(ParamDecl);
997 if (PassedSizeIt == SizeArguments.end())
998 return nullptr;
999
1000 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
1001 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
1002 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
1003 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
1004 C.getSizeType(), E->getExprLoc());
1005 llvm::Value *SizeOfElement =
1006 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
1007 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
1008}
1009
1010/// If Base is known to point to the start of an array, return the length of
1011/// that array. Return 0 if the length cannot be determined.
1013 const Expr *Base,
1014 QualType &IndexedType,
1016 StrictFlexArraysLevel) {
1017 // For the vector indexing extension, the bound is the number of elements.
1018 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
1019 IndexedType = Base->getType();
1020 return CGF.Builder.getInt32(VT->getNumElements());
1021 }
1022
1023 Base = Base->IgnoreParens();
1024
1025 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1026 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
1027 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
1028 StrictFlexArraysLevel)) {
1029 CodeGenFunction::SanitizerScope SanScope(&CGF);
1030
1031 IndexedType = CE->getSubExpr()->getType();
1032 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
1033 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
1034 return CGF.Builder.getInt(CAT->getSize());
1035
1036 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
1037 return CGF.getVLASize(VAT).NumElts;
1038 // Ignore pass_object_size here. It's not applicable on decayed pointers.
1039 }
1040 }
1041
1042 CodeGenFunction::SanitizerScope SanScope(&CGF);
1043
1044 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
1045 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
1046 IndexedType = Base->getType();
1047 return POS;
1048 }
1049
1050 return nullptr;
1051}
1052
1053namespace {
1054
1055/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1056/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1057///
1058/// p in p-> a.b.c
1059///
1060/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1061/// looking for:
1062///
1063/// struct s {
1064/// struct s *ptr;
1065/// int count;
1066/// char array[] __attribute__((counted_by(count)));
1067/// };
1068///
1069/// If we have an expression like \p p->ptr->array[index], we want the
1070/// \p MemberExpr for \p p->ptr instead of \p p.
1071class StructAccessBase
1072 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1073 const RecordDecl *ExpectedRD;
1074
1075 bool IsExpectedRecordDecl(const Expr *E) const {
1076 QualType Ty = E->getType();
1077 if (Ty->isPointerType())
1078 Ty = Ty->getPointeeType();
1079 return ExpectedRD == Ty->getAsRecordDecl();
1080 }
1081
1082public:
1083 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1084
1085 //===--------------------------------------------------------------------===//
1086 // Visitor Methods
1087 //===--------------------------------------------------------------------===//
1088
1089 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1090 // horrors like this:
1091 //
1092 // struct S {
1093 // int x, y;
1094 // int blah[] __attribute__((counted_by(x)));
1095 // } s;
1096 //
1097 // int foo(int index, int val) {
1098 // int (S::*IHatePMDs)[] = &S::blah;
1099 // (s.*IHatePMDs)[index] = val;
1100 // }
1101
1102 const Expr *Visit(const Expr *E) {
1103 return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
1104 }
1105
1106 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1107
1108 // These are the types we expect to return (in order of most to least
1109 // likely):
1110 //
1111 // 1. DeclRefExpr - This is the expression for the base of the structure.
1112 // It's exactly what we want to build an access to the \p counted_by
1113 // field.
1114 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1115 // as the flexble array member's lexical enclosing \p RecordDecl. This
1116 // allows us to catch things like: "p->p->array"
1117 // 3. CompoundLiteralExpr - This is for people who create something
1118 // heretical like (struct foo has a flexible array member):
1119 //
1120 // (struct foo){ 1, 2 }.blah[idx];
1121 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1122 return IsExpectedRecordDecl(E) ? E : nullptr;
1123 }
1124 const Expr *VisitMemberExpr(const MemberExpr *E) {
1125 if (IsExpectedRecordDecl(E) && E->isArrow())
1126 return E;
1127 const Expr *Res = Visit(E->getBase());
1128 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1129 }
1130 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1131 return IsExpectedRecordDecl(E) ? E : nullptr;
1132 }
1133 const Expr *VisitCallExpr(const CallExpr *E) {
1134 return IsExpectedRecordDecl(E) ? E : nullptr;
1135 }
1136
1137 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1138 if (IsExpectedRecordDecl(E))
1139 return E;
1140 return Visit(E->getBase());
1141 }
1142 const Expr *VisitCastExpr(const CastExpr *E) {
1143 if (E->getCastKind() == CK_LValueToRValue)
1144 return IsExpectedRecordDecl(E) ? E : nullptr;
1145 return Visit(E->getSubExpr());
1146 }
1147 const Expr *VisitParenExpr(const ParenExpr *E) {
1148 return Visit(E->getSubExpr());
1149 }
1150 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1151 return Visit(E->getSubExpr());
1152 }
1153 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1154 return Visit(E->getSubExpr());
1155 }
1156};
1157
1158} // end anonymous namespace
1159
1161
1163 const FieldDecl *Field,
1164 RecIndicesTy &Indices) {
1165 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1166 int64_t FieldNo = -1;
1167 for (const FieldDecl *FD : RD->fields()) {
1168 if (!Layout.containsFieldDecl(FD))
1169 // This could happen if the field has a struct type that's empty. I don't
1170 // know why either.
1171 continue;
1172
1173 FieldNo = Layout.getLLVMFieldNo(FD);
1174 if (FD == Field) {
1175 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1176 return true;
1177 }
1178
1179 QualType Ty = FD->getType();
1180 if (Ty->isRecordType()) {
1181 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1182 if (RD->isUnion())
1183 FieldNo = 0;
1184 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1185 return true;
1186 }
1187 }
1188 }
1189
1190 return false;
1191}
1192
1194 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1195 // Find the record containing the count field. Walk up through anonymous
1196 // structs/unions (which are transparent in C) but stop at named records.
1197 // Using getOuterLexicalRecordContext() here would be wrong because it walks
1198 // past named nested structs to the outermost record, causing a crash when a
1199 // struct with a counted_by FAM is defined nested inside another struct.
1200 const RecordDecl *RD = CountDecl->getParent();
1201 while (RD->isAnonymousStructOrUnion()) {
1202 const auto *Parent = dyn_cast<RecordDecl>(RD->getLexicalParent());
1203 if (!Parent)
1204 break;
1205 RD = Parent;
1206 }
1207
1208 // Find the base struct expr (i.e. p in p->a.b.c.d).
1209 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1210 if (!StructBase || StructBase->HasSideEffects(getContext()))
1211 return nullptr;
1212
1213 llvm::Value *Res = nullptr;
1214 if (StructBase->getType()->isPointerType()) {
1215 LValueBaseInfo BaseInfo;
1216 TBAAAccessInfo TBAAInfo;
1217 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1218 Res = Addr.emitRawPointer(*this);
1219 } else if (StructBase->isLValue()) {
1220 LValue LV = EmitLValue(StructBase);
1221 Address Addr = LV.getAddress();
1222 Res = Addr.emitRawPointer(*this);
1223 } else {
1224 return nullptr;
1225 }
1226
1227 RecIndicesTy Indices;
1228 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1229 if (Indices.empty())
1230 return nullptr;
1231
1232 Indices.push_back(Builder.getInt32(0));
1233 CanQualType T = CGM.getContext().getCanonicalTagType(RD);
1234 return Builder.CreateInBoundsGEP(ConvertType(T), Res,
1235 RecIndicesTy(llvm::reverse(Indices)),
1236 "counted_by.gep");
1237}
1238
1239/// This method is typically called in contexts where we can't generate
1240/// side-effects, like in __builtin_dynamic_object_size. When finding
1241/// expressions, only choose those that have either already been emitted or can
1242/// be loaded without side-effects.
1243///
1244/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1245/// within the top-level struct.
1246/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1248 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1249 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1250 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1251 getIntAlign(), "counted_by.load");
1252 return nullptr;
1253}
1254
1256 const Expr *ArrayExprBase,
1257 llvm::Value *IndexVal, QualType IndexType,
1258 bool Accessed) {
1259 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1260 "should not be called unless adding bounds checks");
1261 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1262 getLangOpts().getStrictFlexArraysLevel();
1263 QualType ArrayExprBaseType;
1264 llvm::Value *BoundsVal = getArrayIndexingBound(
1265 *this, ArrayExprBase, ArrayExprBaseType, StrictFlexArraysLevel);
1266
1267 EmitBoundsCheckImpl(ArrayExpr, ArrayExprBaseType, IndexVal, IndexType,
1268 BoundsVal, getContext().getSizeType(), Accessed);
1269}
1270
1272 QualType ArrayBaseType,
1273 llvm::Value *IndexVal,
1274 QualType IndexType,
1275 llvm::Value *BoundsVal,
1276 QualType BoundsType, bool Accessed) {
1277 if (!BoundsVal)
1278 return;
1279
1280 auto CheckKind = SanitizerKind::SO_ArrayBounds;
1281 auto CheckHandler = SanitizerHandler::OutOfBounds;
1282 SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler);
1283
1284 // All hail the C implicit type conversion rules!!!
1285 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1286 bool BoundsSigned = BoundsType->isSignedIntegerOrEnumerationType();
1287
1288 const ASTContext &Ctx = getContext();
1289 llvm::Type *Ty = ConvertType(
1290 Ctx.getTypeSize(IndexType) >= Ctx.getTypeSize(BoundsType) ? IndexType
1291 : BoundsType);
1292
1293 llvm::Value *IndexInst = Builder.CreateIntCast(IndexVal, Ty, IndexSigned);
1294 llvm::Value *BoundsInst = Builder.CreateIntCast(BoundsVal, Ty, false);
1295
1296 llvm::Constant *StaticData[] = {
1297 EmitCheckSourceLocation(ArrayExpr->getExprLoc()),
1298 EmitCheckTypeDescriptor(ArrayBaseType),
1299 EmitCheckTypeDescriptor(IndexType),
1300 };
1301
1302 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexInst, BoundsInst)
1303 : Builder.CreateICmpULE(IndexInst, BoundsInst);
1304
1305 if (BoundsSigned) {
1306 // Don't allow a negative bounds.
1307 llvm::Value *Cmp = Builder.CreateICmpSGT(
1308 BoundsVal, llvm::ConstantInt::get(BoundsVal->getType(), 0));
1309 Check = Builder.CreateAnd(Cmp, Check);
1310 }
1311
1312 EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData,
1313 IndexInst);
1314}
1315
1317 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, getContext());
1318 if (!ATMD)
1319 return nullptr;
1320
1321 llvm::MDBuilder MDB(getLLVMContext());
1322 auto *TypeNameMD = MDB.createString(ATMD->TypeName);
1323 auto *ContainsPtrC = Builder.getInt1(ATMD->ContainsPointer);
1324 auto *ContainsPtrMD = MDB.createConstant(ContainsPtrC);
1325
1326 // Format: !{<type-name>, <contains-pointer>}
1327 return llvm::MDNode::get(CGM.getLLVMContext(), {TypeNameMD, ContainsPtrMD});
1328}
1329
1330void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) {
1331 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1332 "Only needed with -fsanitize=alloc-token");
1333 CB->setMetadata(llvm::LLVMContext::MD_alloc_token,
1334 buildAllocToken(AllocType));
1335}
1336
1339 if (!AllocType.isNull())
1340 return buildAllocToken(AllocType);
1341 return nullptr;
1342}
1343
1344void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, const CallExpr *E) {
1345 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1346 "Only needed with -fsanitize=alloc-token");
1347 if (llvm::MDNode *MDN = buildAllocToken(E))
1348 CB->setMetadata(llvm::LLVMContext::MD_alloc_token, MDN);
1349}
1350
1353 bool isInc, bool isPre) {
1354 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1355
1356 llvm::Value *NextVal;
1357 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1358 uint64_t AmountVal = isInc ? 1 : -1;
1359 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1360
1361 // Add the inc/dec to the real part.
1362 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1363 } else {
1364 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1365 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1366 if (!isInc)
1367 FVal.changeSign();
1368 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1369
1370 // Add the inc/dec to the real part.
1371 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1372 }
1373
1374 ComplexPairTy IncVal(NextVal, InVal.second);
1375
1376 // Store the updated result through the lvalue.
1377 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1378 if (getLangOpts().OpenMP)
1379 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1380 E->getSubExpr());
1381
1382 // If this is a postinc, return the value read from memory, otherwise use the
1383 // updated value.
1384 return isPre ? IncVal : InVal;
1385}
1386
1388 CodeGenFunction *CGF) {
1389 // Bind VLAs in the cast type.
1390 if (CGF && E->getType()->isVariablyModifiedType())
1392
1393 if (CGDebugInfo *DI = getModuleDebugInfo())
1394 DI->EmitExplicitCastType(E->getType());
1395}
1396
1397//===----------------------------------------------------------------------===//
1398// LValue Expression Emission
1399//===----------------------------------------------------------------------===//
1400
1401static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx,
1402 CharUnits eltSize) {
1403 // If we have a constant index, we can use the exact offset of the
1404 // element we're accessing.
1405 if (auto *constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
1406 CharUnits offset = constantIdx->getZExtValue() * eltSize;
1407 return arrayAlign.alignmentAtOffset(offset);
1408 }
1409
1410 // Otherwise, use the worst-case alignment for any element.
1411 return arrayAlign.alignmentOfArrayElement(eltSize);
1412}
1413
1414/// Emit pointer + index arithmetic.
1416 const BinaryOperator *BO,
1417 LValueBaseInfo *BaseInfo,
1418 TBAAAccessInfo *TBAAInfo,
1419 KnownNonNull_t IsKnownNonNull) {
1420 assert(BO->isAdditiveOp() && "Expect an addition or subtraction.");
1421 Expr *pointerOperand = BO->getLHS();
1422 Expr *indexOperand = BO->getRHS();
1423 bool isSubtraction = BO->getOpcode() == BO_Sub;
1424
1425 Address BaseAddr = Address::invalid();
1426 llvm::Value *index = nullptr;
1427 // In a subtraction, the LHS is always the pointer.
1428 // Note: do not change the evaluation order.
1429 if (!isSubtraction && !pointerOperand->getType()->isAnyPointerType()) {
1430 std::swap(pointerOperand, indexOperand);
1431 index = CGF.EmitScalarExpr(indexOperand);
1432 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1434 } else {
1435 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1437 index = CGF.EmitScalarExpr(indexOperand);
1438 }
1439
1440 llvm::Value *pointer = BaseAddr.getBasePointer();
1441 llvm::Value *Res = CGF.EmitPointerArithmetic(
1442 BO, pointerOperand, pointer, indexOperand, index, isSubtraction);
1443 QualType PointeeTy = BO->getType()->getPointeeType();
1444 CharUnits Align =
1446 CGF.getContext().getTypeSizeInChars(PointeeTy));
1447 return Address(Res, CGF.ConvertTypeForMem(PointeeTy), Align,
1449 /*Offset=*/nullptr, IsKnownNonNull);
1450}
1451
1453 TBAAAccessInfo *TBAAInfo,
1454 KnownNonNull_t IsKnownNonNull,
1455 CodeGenFunction &CGF) {
1456 // We allow this with ObjC object pointers because of fragile ABIs.
1457 assert(E->getType()->isPointerType() ||
1459 E = E->IgnoreParens();
1460
1461 // Casts:
1462 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1463 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1464 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1465
1466 switch (CE->getCastKind()) {
1467 // Non-converting casts (but not C's implicit conversion from void*).
1468 case CK_BitCast:
1469 case CK_NoOp:
1470 case CK_AddressSpaceConversion:
1471 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1472 if (PtrTy->getPointeeType()->isVoidType())
1473 break;
1474
1475 LValueBaseInfo InnerBaseInfo;
1476 TBAAAccessInfo InnerTBAAInfo;
1478 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1479 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1480 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1481
1482 if (isa<ExplicitCastExpr>(CE)) {
1483 LValueBaseInfo TargetTypeBaseInfo;
1484 TBAAAccessInfo TargetTypeTBAAInfo;
1486 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1487 if (TBAAInfo)
1488 *TBAAInfo =
1489 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1490 // If the source l-value is opaque, honor the alignment of the
1491 // casted-to type.
1492 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1493 if (BaseInfo)
1494 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1495 Addr.setAlignment(Align);
1496 }
1497 }
1498
1499 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1500 CE->getCastKind() == CK_BitCast) {
1501 if (auto PT = E->getType()->getAs<PointerType>())
1502 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1503 /*MayBeNull=*/true,
1505 CE->getBeginLoc());
1506 }
1507
1508 llvm::Type *ElemTy =
1510 Addr = Addr.withElementType(ElemTy);
1511 if (CE->getCastKind() == CK_AddressSpaceConversion)
1513 Addr, CGF.ConvertType(E->getType()), ElemTy);
1514
1515 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1516 CE->getType());
1517 }
1518 break;
1519
1520 // Array-to-pointer decay.
1521 case CK_ArrayToPointerDecay:
1522 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1523
1524 // Derived-to-base conversions.
1525 case CK_UncheckedDerivedToBase:
1526 case CK_DerivedToBase: {
1527 // TODO: Support accesses to members of base classes in TBAA. For now, we
1528 // conservatively pretend that the complete object is of the base class
1529 // type.
1530 if (TBAAInfo)
1531 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1533 CE->getSubExpr(), BaseInfo, nullptr,
1534 (KnownNonNull_t)(IsKnownNonNull ||
1535 CE->getCastKind() == CK_UncheckedDerivedToBase));
1536 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1537 return CGF.GetAddressOfBaseClass(
1538 Addr, Derived, CE->path_begin(), CE->path_end(),
1539 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1540 }
1541
1542 // TODO: Is there any reason to treat base-to-derived conversions
1543 // specially?
1544 default:
1545 break;
1546 }
1547 }
1548
1549 // Unary &.
1550 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1551 if (UO->getOpcode() == UO_AddrOf) {
1552 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1553 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1554 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1555 return LV.getAddress();
1556 }
1557 }
1558
1559 // std::addressof and variants.
1560 if (auto *Call = dyn_cast<CallExpr>(E)) {
1561 switch (Call->getBuiltinCallee()) {
1562 default:
1563 break;
1564 case Builtin::BIaddressof:
1565 case Builtin::BI__addressof:
1566 case Builtin::BI__builtin_addressof: {
1567 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1568 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1569 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1570 return LV.getAddress();
1571 }
1572 }
1573 }
1574
1575 // Pointer arithmetic: pointer +/- index.
1576 if (auto *BO = dyn_cast<BinaryOperator>(E)) {
1577 if (BO->isAdditiveOp())
1578 return emitPointerArithmetic(CGF, BO, BaseInfo, TBAAInfo, IsKnownNonNull);
1579 }
1580
1581 // TODO: conditional operators, comma.
1582
1583 // Otherwise, use the alignment of the type.
1586 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1587}
1588
1589/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1590/// derive a more accurate bound on the alignment of the pointer.
1592 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1593 KnownNonNull_t IsKnownNonNull) {
1594 Address Addr =
1595 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1596 if (IsKnownNonNull && !Addr.isKnownNonNull())
1597 Addr.setKnownNonNull();
1598 return Addr;
1599}
1600
1602 llvm::Value *V = RV.getScalarVal();
1603 if (auto MPT = T->getAs<MemberPointerType>())
1604 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1605 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1606}
1607
1609 if (Ty->isVoidType())
1610 return RValue::get(nullptr);
1611
1612 switch (getEvaluationKind(Ty)) {
1613 case TEK_Complex: {
1614 llvm::Type *EltTy =
1616 llvm::Value *U = llvm::UndefValue::get(EltTy);
1617 return RValue::getComplex(std::make_pair(U, U));
1618 }
1619
1620 // If this is a use of an undefined aggregate type, the aggregate must have an
1621 // identifiable address. Just because the contents of the value are undefined
1622 // doesn't mean that the address can't be taken and compared.
1623 case TEK_Aggregate: {
1624 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1625 return RValue::getAggregate(DestPtr);
1626 }
1627
1628 case TEK_Scalar:
1629 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1630 }
1631 llvm_unreachable("bad evaluation kind");
1632}
1633
1635 const char *Name) {
1636 ErrorUnsupported(E, Name);
1637 return GetUndefRValue(E->getType());
1638}
1639
1641 const char *Name) {
1642 ErrorUnsupported(E, Name);
1643 llvm::Type *ElTy = ConvertType(E->getType());
1644 llvm::Type *Ty = DefaultPtrTy;
1645 return MakeAddrLValue(
1646 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1647}
1648
1650 const Expr *Base = Obj;
1651 while (!isa<CXXThisExpr>(Base)) {
1652 // The result of a dynamic_cast can be null.
1654 return false;
1655
1656 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1657 Base = CE->getSubExpr();
1658 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1659 Base = PE->getSubExpr();
1660 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1661 if (UO->getOpcode() == UO_Extension)
1662 Base = UO->getSubExpr();
1663 else
1664 return false;
1665 } else {
1666 return false;
1667 }
1668 }
1669 return true;
1670}
1671
1673 LValue LV;
1674 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1675 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1676 else
1677 LV = EmitLValue(E);
1678 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1679 SanitizerSet SkippedChecks;
1680 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1681 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1682 if (IsBaseCXXThis)
1683 SkippedChecks.set(SanitizerKind::Alignment, true);
1684 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1685 SkippedChecks.set(SanitizerKind::Null, true);
1686 }
1687 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1688 }
1689 return LV;
1690}
1691
1692/// EmitLValue - Emit code to compute a designator that specifies the location
1693/// of the expression.
1694///
1695/// This can return one of two things: a simple address or a bitfield reference.
1696/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1697/// an LLVM pointer type.
1698///
1699/// If this returns a bitfield reference, nothing about the pointee type of the
1700/// LLVM value is known: For example, it may not be a pointer to an integer.
1701///
1702/// If this returns a normal address, and if the lvalue's C type is fixed size,
1703/// this method guarantees that the returned pointer type will point to an LLVM
1704/// type of the same size of the lvalue's type. If the lvalue has a variable
1705/// length type, this is not possible.
1706///
1708 KnownNonNull_t IsKnownNonNull) {
1709 // Running with sufficient stack space to avoid deeply nested expressions
1710 // cause a stack overflow.
1711 LValue LV;
1712 CGM.runWithSufficientStackSpace(
1713 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1714
1715 if (IsKnownNonNull && !LV.isKnownNonNull())
1716 LV.setKnownNonNull();
1717 return LV;
1718}
1719
1720LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1721 KnownNonNull_t IsKnownNonNull) {
1722 ApplyDebugLocation DL(*this, E);
1723 switch (E->getStmtClass()) {
1724 default: return EmitUnsupportedLValue(E, "l-value expression");
1725
1726 case Expr::ObjCPropertyRefExprClass:
1727 llvm_unreachable("cannot emit a property reference directly");
1728
1729 case Expr::ObjCSelectorExprClass:
1731 case Expr::ObjCIsaExprClass:
1733 case Expr::BinaryOperatorClass:
1735 case Expr::CompoundAssignOperatorClass: {
1736 QualType Ty = E->getType();
1737 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1738 Ty = AT->getValueType();
1739 if (!Ty->isAnyComplexType())
1742 }
1743 case Expr::CallExprClass:
1744 case Expr::CXXMemberCallExprClass:
1745 case Expr::CXXOperatorCallExprClass:
1746 case Expr::UserDefinedLiteralClass:
1748 case Expr::CXXRewrittenBinaryOperatorClass:
1749 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1750 IsKnownNonNull);
1751 case Expr::VAArgExprClass:
1753 case Expr::DeclRefExprClass:
1755 case Expr::ConstantExprClass: {
1756 const ConstantExpr *CE = cast<ConstantExpr>(E);
1757 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE))
1759 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1760 }
1761 case Expr::ParenExprClass:
1762 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1763 case Expr::GenericSelectionExprClass:
1764 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1765 IsKnownNonNull);
1766 case Expr::PredefinedExprClass:
1768 case Expr::StringLiteralClass:
1770 case Expr::ObjCEncodeExprClass:
1772 case Expr::PseudoObjectExprClass:
1774 case Expr::InitListExprClass:
1776 case Expr::CXXTemporaryObjectExprClass:
1777 case Expr::CXXConstructExprClass:
1779 case Expr::CXXBindTemporaryExprClass:
1781 case Expr::CXXUuidofExprClass:
1783 case Expr::LambdaExprClass:
1784 return EmitAggExprToLValue(E);
1785
1786 case Expr::ExprWithCleanupsClass: {
1787 const auto *cleanups = cast<ExprWithCleanups>(E);
1788 RunCleanupsScope Scope(*this);
1789 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1790 if (LV.isSimple()) {
1791 // Defend against branches out of gnu statement expressions surrounded by
1792 // cleanups.
1793 Address Addr = LV.getAddress();
1794 llvm::Value *V = Addr.getBasePointer();
1795 Scope.ForceCleanup({&V});
1796 Addr.replaceBasePointer(V);
1797 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1798 LV.getBaseInfo(), LV.getTBAAInfo());
1799 }
1800 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1801 // bitfield lvalue or some other non-simple lvalue?
1802 return LV;
1803 }
1804
1805 case Expr::CXXDefaultArgExprClass: {
1806 auto *DAE = cast<CXXDefaultArgExpr>(E);
1807 CXXDefaultArgExprScope Scope(*this, DAE);
1808 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1809 }
1810 case Expr::CXXDefaultInitExprClass: {
1811 auto *DIE = cast<CXXDefaultInitExpr>(E);
1812 CXXDefaultInitExprScope Scope(*this, DIE);
1813 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1814 }
1815 case Expr::CXXTypeidExprClass:
1817
1818 case Expr::ObjCMessageExprClass:
1820 case Expr::ObjCIvarRefExprClass:
1822 case Expr::StmtExprClass:
1824 case Expr::UnaryOperatorClass:
1826 case Expr::ArraySubscriptExprClass:
1828 case Expr::MatrixSingleSubscriptExprClass:
1830 case Expr::MatrixSubscriptExprClass:
1832 case Expr::ArraySectionExprClass:
1834 case Expr::ExtVectorElementExprClass:
1836 case Expr::MatrixElementExprClass:
1838 case Expr::CXXThisExprClass:
1840 case Expr::MemberExprClass:
1842 case Expr::CompoundLiteralExprClass:
1844 case Expr::ConditionalOperatorClass:
1846 case Expr::BinaryConditionalOperatorClass:
1848 case Expr::ChooseExprClass:
1849 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1850 case Expr::OpaqueValueExprClass:
1852 case Expr::SubstNonTypeTemplateParmExprClass:
1853 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1854 IsKnownNonNull);
1855 case Expr::ImplicitCastExprClass:
1856 case Expr::CStyleCastExprClass:
1857 case Expr::CXXFunctionalCastExprClass:
1858 case Expr::CXXStaticCastExprClass:
1859 case Expr::CXXDynamicCastExprClass:
1860 case Expr::CXXReinterpretCastExprClass:
1861 case Expr::CXXConstCastExprClass:
1862 case Expr::CXXAddrspaceCastExprClass:
1863 case Expr::ObjCBridgedCastExprClass:
1864 return EmitCastLValue(cast<CastExpr>(E));
1865
1866 case Expr::MaterializeTemporaryExprClass:
1868
1869 case Expr::CoawaitExprClass:
1871 case Expr::CoyieldExprClass:
1873 case Expr::PackIndexingExprClass:
1874 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1875 case Expr::HLSLOutArgExprClass:
1876 llvm_unreachable("cannot emit a HLSL out argument directly");
1877 }
1878}
1879
1880/// Given an object of the given canonical type, can we safely copy a
1881/// value out of it based on its initializer?
1883 assert(type.isCanonical());
1884 assert(!type->isReferenceType());
1885
1886 // Must be const-qualified but non-volatile.
1887 Qualifiers qs = type.getLocalQualifiers();
1888 if (!qs.hasConst() || qs.hasVolatile()) return false;
1889
1890 // Otherwise, all object types satisfy this except C++ classes with
1891 // mutable subobjects or non-trivial copy/destroy behavior.
1892 if (const auto *RT = dyn_cast<RecordType>(type))
1893 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
1894 RD = RD->getDefinitionOrSelf();
1895 if (RD->hasMutableFields() || !RD->isTrivial())
1896 return false;
1897 }
1898
1899 return true;
1900}
1901
1902/// Can we constant-emit a load of a reference to a variable of the
1903/// given type? This is different from predicates like
1904/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1905/// in situations that don't necessarily satisfy the language's rules
1906/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1907/// to do this with const float variables even if those variables
1908/// aren't marked 'constexpr'.
1916 type = type.getCanonicalType();
1917 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1918 if (isConstantEmittableObjectType(ref->getPointeeType()))
1920 return CEK_AsReferenceOnly;
1921 }
1923 return CEK_AsValueOnly;
1924 return CEK_None;
1925}
1926
1927/// Try to emit a reference to the given value without producing it as
1928/// an l-value. This is just an optimization, but it avoids us needing
1929/// to emit global copies of variables if they're named without triggering
1930/// a formal use in a context where we can't emit a direct reference to them,
1931/// for instance if a block or lambda or a member of a local class uses a
1932/// const int variable or constexpr variable from an enclosing function.
1935 const ValueDecl *Value = RefExpr->getDecl();
1936
1937 // The value needs to be an enum constant or a constant variable.
1939 if (isa<ParmVarDecl>(Value)) {
1940 CEK = CEK_None;
1941 } else if (const auto *var = dyn_cast<VarDecl>(Value)) {
1942 CEK = checkVarTypeForConstantEmission(var->getType());
1943 } else if (isa<EnumConstantDecl>(Value)) {
1944 CEK = CEK_AsValueOnly;
1945 } else {
1946 CEK = CEK_None;
1947 }
1948 if (CEK == CEK_None) return ConstantEmission();
1949
1950 Expr::EvalResult result;
1951 bool resultIsReference;
1952 QualType resultType;
1953
1954 // It's best to evaluate all the way as an r-value if that's permitted.
1955 if (CEK != CEK_AsReferenceOnly &&
1956 RefExpr->EvaluateAsRValue(result, getContext())) {
1957 resultIsReference = false;
1958 resultType = RefExpr->getType().getUnqualifiedType();
1959
1960 // Otherwise, try to evaluate as an l-value.
1961 } else if (CEK != CEK_AsValueOnly &&
1962 RefExpr->EvaluateAsLValue(result, getContext())) {
1963 resultIsReference = true;
1964 resultType = Value->getType();
1965
1966 // Failure.
1967 } else {
1968 return ConstantEmission();
1969 }
1970
1971 // In any case, if the initializer has side-effects, abandon ship.
1972 if (result.HasSideEffects)
1973 return ConstantEmission();
1974
1975 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1976 // referencing a global host variable by copy. In this case the lambda should
1977 // make a copy of the value of the global host variable. The DRE of the
1978 // captured reference variable cannot be emitted as load from the host
1979 // global variable as compile time constant, since the host variable is not
1980 // accessible on device. The DRE of the captured reference variable has to be
1981 // loaded from captures.
1982 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1984 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1985 if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1986 const APValue::LValueBase &base = result.Val.getLValueBase();
1987 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1988 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1989 if (!VD->hasAttr<CUDADeviceAttr>()) {
1990 return ConstantEmission();
1991 }
1992 }
1993 }
1994 }
1995 }
1996
1997 // Emit as a constant.
1998 llvm::Constant *C = ConstantEmitter(*this).emitAbstract(
1999 RefExpr->getLocation(), result.Val, resultType);
2000
2001 // Make sure we emit a debug reference to the global variable.
2002 // This should probably fire even for
2003 if (isa<VarDecl>(Value)) {
2004 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(Value)))
2005 EmitDeclRefExprDbgValue(RefExpr, result.Val);
2006 } else {
2008 EmitDeclRefExprDbgValue(RefExpr, result.Val);
2009 }
2010
2011 // If we emitted a reference constant, we need to dereference that.
2012 if (resultIsReference)
2014
2016}
2017
2019 const MemberExpr *ME) {
2020 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
2021 // Try to emit static variable member expressions as DREs.
2022 return DeclRefExpr::Create(
2024 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
2025 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
2026 }
2027 return nullptr;
2028}
2029
2033 return tryEmitAsConstant(DRE);
2034 return ConstantEmission();
2035}
2036
2038 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
2039 assert(Constant && "not a constant");
2040 if (Constant.isReference())
2041 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
2042 E->getExprLoc())
2043 .getScalarVal();
2044 return Constant.getValue();
2045}
2046
2048 SourceLocation Loc) {
2049 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
2050 lvalue.getType(), Loc, lvalue.getBaseInfo(),
2051 lvalue.getTBAAInfo(), lvalue.isNontemporal());
2052}
2053
2055 llvm::APInt &Min, llvm::APInt &End,
2056 bool StrictEnums, bool IsBool) {
2057 const auto *ED = Ty->getAsEnumDecl();
2058 bool IsRegularCPlusPlusEnum =
2059 CGF.getLangOpts().CPlusPlus && StrictEnums && ED && !ED->isFixed();
2060 if (!IsBool && !IsRegularCPlusPlusEnum)
2061 return false;
2062
2063 if (IsBool) {
2064 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
2065 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
2066 } else {
2067 ED->getValueRange(End, Min);
2068 }
2069 return true;
2070}
2071
2072llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
2073 llvm::APInt Min, End;
2074 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
2075 Ty->hasBooleanRepresentation() && !Ty->isVectorType()))
2076 return nullptr;
2077
2078 llvm::MDBuilder MDHelper(getLLVMContext());
2079 return MDHelper.createRange(Min, End);
2080}
2081
2083 SourceLocation Loc) {
2084 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2085 // In order to prevent the optimizer from throwing away the check, don't
2086 // attach range metadata to the load.
2087 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2088 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2089 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2090 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2091 llvm::MDNode::get(CGM.getLLVMContext(), {}));
2092 }
2093 }
2094}
2095
2097 SourceLocation Loc) {
2098 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
2099 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
2100 if (!HasBoolCheck && !HasEnumCheck)
2101 return false;
2102
2103 bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) ||
2104 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
2105 bool NeedsBoolCheck = HasBoolCheck && IsBool;
2106 bool NeedsEnumCheck = HasEnumCheck && Ty->isEnumeralType();
2107 if (!NeedsBoolCheck && !NeedsEnumCheck)
2108 return false;
2109
2110 // Single-bit booleans don't need to be checked. Special-case this to avoid
2111 // a bit width mismatch when handling bitfield values. This is handled by
2112 // EmitFromMemory for the non-bitfield case.
2113 if (IsBool &&
2114 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
2115 return false;
2116
2117 if (NeedsEnumCheck &&
2118 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
2119 return false;
2120
2121 llvm::APInt Min, End;
2122 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
2123 return true;
2124
2126 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
2127
2128 auto &Ctx = getLLVMContext();
2129 auto CheckHandler = SanitizerHandler::LoadInvalidValue;
2130 SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler);
2131 llvm::Value *Check;
2132 --End;
2133 if (!Min) {
2134 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
2135 } else {
2136 llvm::Value *Upper =
2137 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
2138 llvm::Value *Lower =
2139 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
2140 Check = Builder.CreateAnd(Upper, Lower);
2141 }
2142 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
2144 EmitCheck(std::make_pair(Check, Kind), CheckHandler, StaticArgs, Value);
2145 return true;
2146}
2147
2149 QualType Ty,
2150 SourceLocation Loc,
2151 LValueBaseInfo BaseInfo,
2152 TBAAAccessInfo TBAAInfo,
2153 bool isNontemporal) {
2154 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2155 if (GV->isThreadLocal())
2156 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2158
2159 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2160 // Boolean vectors use `iN` as storage type.
2161 if (ClangVecTy->isPackedVectorBoolType(getContext())) {
2162 llvm::Type *ValTy = ConvertType(Ty);
2163 unsigned ValNumElems =
2164 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2165 // Load the `iP` storage object (P is the padded vector size).
2166 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
2167 const auto *RawIntTy = RawIntV->getType();
2168 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
2169 // Bitcast iP --> <P x i1>.
2170 auto *PaddedVecTy = llvm::FixedVectorType::get(
2171 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2172 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2173 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2174 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2175
2176 return EmitFromMemory(V, Ty);
2177 }
2178
2179 // Handles vectors of sizes that are likely to be expanded to a larger size
2180 // to optimize performance.
2181 auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2182 auto *NewVecTy =
2183 CGM.getABIInfo().getOptimalVectorMemoryType(VTy, getLangOpts());
2184
2185 if (VTy != NewVecTy) {
2186 Address Cast = Addr.withElementType(NewVecTy);
2187 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2188 unsigned OldNumElements = VTy->getNumElements();
2189 SmallVector<int, 16> Mask(OldNumElements);
2190 std::iota(Mask.begin(), Mask.end(), 0);
2191 V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2192 return EmitFromMemory(V, Ty);
2193 }
2194 }
2195
2196 // Atomic operations have to be done on integral types.
2197 LValue AtomicLValue =
2198 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2199 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2200 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2201 }
2202
2203 Addr =
2204 Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
2205
2206 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2207 if (isNontemporal) {
2208 llvm::MDNode *Node = llvm::MDNode::get(
2209 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2210 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2211 }
2212
2213 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2214
2215 maybeAttachRangeForLoad(Load, Ty, Loc);
2216
2217 return EmitFromMemory(Load, Ty);
2218}
2219
2220/// Converts a scalar value from its primary IR type (as returned
2221/// by ConvertType) to its load/store type (as returned by
2222/// convertTypeForLoadStore).
2223llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2224 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2225 Ty = AtomicTy->getValueType();
2226
2227 if (Ty->isExtVectorBoolType() || Ty->isConstantMatrixBoolType()) {
2228 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2229
2230 if (Value->getType() == StoreTy)
2231 return Value;
2232
2233 if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() >
2234 Value->getType()->getScalarSizeInBits())
2235 return Builder.CreateZExt(Value, StoreTy);
2236
2237 // Expand to the memory bit width.
2238 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2239 // <N x i1> --> <P x i1>.
2240 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2241 // <P x i1> --> iP.
2242 Value = Builder.CreateBitCast(Value, StoreTy);
2243 }
2244
2245 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) {
2246 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2248 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2249 }
2250
2251 return Value;
2252}
2253
2254/// Converts a scalar value from its load/store type (as returned
2255/// by convertTypeForLoadStore) to its primary IR type (as returned
2256/// by ConvertType).
2257llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2258 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2259 Ty = AtomicTy->getValueType();
2260
2262 const auto *RawIntTy = Value->getType();
2263
2264 // Bitcast iP --> <P x i1>.
2265 auto *PaddedVecTy = llvm::FixedVectorType::get(
2266 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2267 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2268 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2269 llvm::Type *ValTy = ConvertType(Ty);
2270 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2271 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2272 }
2273
2274 llvm::Type *ResTy = ConvertType(Ty);
2275 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType() ||
2276 Ty->isExtVectorBoolType())
2277 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2278
2279 return Value;
2280}
2281
2282// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2283// MatrixType), if it points to a array (the memory type of MatrixType).
2285 CodeGenFunction &CGF,
2286 bool IsVector = true) {
2287 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2288 if (ArrayTy && IsVector) {
2289 auto ArrayElements = ArrayTy->getNumElements();
2290 auto *ArrayElementTy = ArrayTy->getElementType();
2291 if (CGF.getContext().getLangOpts().HLSL) {
2292 auto *VectorTy = cast<llvm::FixedVectorType>(ArrayElementTy);
2293 ArrayElementTy = VectorTy->getElementType();
2294 ArrayElements *= VectorTy->getNumElements();
2295 }
2296 auto *VectorTy = llvm::FixedVectorType::get(ArrayElementTy, ArrayElements);
2297
2298 return Addr.withElementType(VectorTy);
2299 }
2300 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2301 if (VectorTy && !IsVector) {
2302 auto *ArrayTy = llvm::ArrayType::get(
2303 VectorTy->getElementType(),
2304 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2305
2306 return Addr.withElementType(ArrayTy);
2307 }
2308
2309 return Addr;
2310}
2311
2313 LValue Base;
2314 if (E->getBase()->isGLValue())
2315 Base = EmitLValue(E->getBase());
2316 else {
2317 assert(E->getBase()->getType()->isConstantMatrixType() &&
2318 "Result must be a Constant Matrix");
2319 llvm::Value *Mat = EmitScalarExpr(E->getBase());
2320 Address MatMem = CreateMemTemp(E->getBase()->getType());
2321 QualType Ty = E->getBase()->getType();
2322 llvm::Type *LTy = convertTypeForLoadStore(Ty, Mat->getType());
2323 if (LTy->getScalarSizeInBits() > Mat->getType()->getScalarSizeInBits())
2324 Mat = Builder.CreateZExt(Mat, LTy);
2325 Builder.CreateStore(Mat, MatMem);
2327 }
2328 QualType ResultType =
2329 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2330
2331 // Encode the element access list into a vector of unsigned indices.
2332 // getEncodedElementAccess returns row-major linearized indices.
2334 E->getEncodedElementAccess(Indices);
2335
2336 // getEncodedElementAccess returns row-major linearized indices
2337 // If the matrix memory layout is column-major, convert indices
2338 // to column-major indices.
2339 bool IsColMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2341 if (IsColMajor) {
2342 const auto *MT = E->getBase()->getType()->castAs<ConstantMatrixType>();
2343 unsigned NumCols = MT->getNumColumns();
2344 for (uint32_t &Idx : Indices) {
2345 // Decompose row-major index: Row = Idx / NumCols, Col = Idx % NumCols
2346 unsigned Row = Idx / NumCols;
2347 unsigned Col = Idx % NumCols;
2348 // Re-linearize as column-major
2349 Idx = MT->getColumnMajorFlattenedIndex(Row, Col);
2350 }
2351 }
2352
2353 if (Base.isSimple()) {
2354 RawAddress MatAddr = Base.getAddress();
2355 if (getLangOpts().HLSL &&
2357 MatAddr = CGM.getHLSLRuntime().createBufferMatrixTempAddress(
2358 Base, E->getExprLoc(), *this);
2359
2360 llvm::Constant *CV =
2361 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
2363 CV, ResultType, Base.getBaseInfo(),
2364 TBAAAccessInfo());
2365 }
2366 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2367
2368 llvm::Constant *BaseElts = Base.getExtVectorElts();
2370
2371 for (unsigned Index : Indices)
2372 CElts.push_back(BaseElts->getAggregateElement(Index));
2373 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2374
2376 MaybeConvertMatrixAddress(Base.getExtVectorAddress(), *this), CV,
2377 ResultType, Base.getBaseInfo(), TBAAAccessInfo());
2378}
2379
2380// Emit a store of a matrix LValue. This may require casting the original
2381// pointer to memory address (ArrayType) to a pointer to the value type
2382// (VectorType).
2383static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2384 bool isInit, CodeGenFunction &CGF) {
2385 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2386 value->getType()->isVectorTy());
2387 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2388 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2389 lvalue.isNontemporal());
2390}
2391
2393 bool Volatile, QualType Ty,
2394 LValueBaseInfo BaseInfo,
2395 TBAAAccessInfo TBAAInfo,
2396 bool isInit, bool isNontemporal) {
2397 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2398 if (GV->isThreadLocal())
2399 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2401
2402 // Handles vectors of sizes that are likely to be expanded to a larger size
2403 // to optimize performance.
2404 llvm::Type *SrcTy = Value->getType();
2405 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2406 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2407 auto *NewVecTy =
2408 CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts());
2409 if (!ClangVecTy->isPackedVectorBoolType(getContext()) &&
2410 VecTy != NewVecTy) {
2411 SmallVector<int, 16> Mask(NewVecTy->getNumElements(),
2412 VecTy->getNumElements());
2413 std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2414 // Use undef instead of poison for the padding lanes, to make sure no
2415 // padding bits are poisoned, which may break coercion.
2416 Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
2417 Mask, "extractVec");
2418 SrcTy = NewVecTy;
2419 }
2420 if (Addr.getElementType() != SrcTy)
2421 Addr = Addr.withElementType(SrcTy);
2422 }
2423 }
2424
2425 Value = EmitToMemory(Value, Ty);
2426
2427 LValue AtomicLValue =
2428 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2429 if (Ty->isAtomicType() ||
2430 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2431 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2432 return;
2433 }
2434
2435 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2437
2438 if (isNontemporal) {
2439 llvm::MDNode *Node =
2440 llvm::MDNode::get(Store->getContext(),
2441 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2442 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2443 }
2444
2445 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2446}
2447
2448void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2449 bool isInit) {
2450 if (lvalue.getType()->isConstantMatrixType()) {
2451 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2452 return;
2453 }
2454
2455 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2456 lvalue.getType(), lvalue.getBaseInfo(),
2457 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2458}
2459
2460// Emit a load of a LValue of matrix type. This may require casting the pointer
2461// to memory address (ArrayType) to a pointer to the value type (VectorType).
2463 CodeGenFunction &CGF) {
2464 assert(LV.getType()->isConstantMatrixType());
2465 RawAddress DestAddr = LV.getAddress();
2466
2467 // HLSL constant buffers may pad matrix layouts, so copy elements into a
2468 // non-padded local alloca before loading.
2469 if (CGF.getLangOpts().HLSL &&
2470 LV.getType().getAddressSpace() == LangAS::hlsl_constant)
2471 DestAddr =
2473
2474 Address Addr = MaybeConvertMatrixAddress(DestAddr, CGF);
2475 LV.setAddress(Addr);
2476 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2477}
2478
2480 SourceLocation Loc) {
2481 QualType Ty = LV.getType();
2482 switch (getEvaluationKind(Ty)) {
2483 case TEK_Scalar:
2484 return EmitLoadOfLValue(LV, Loc);
2485 case TEK_Complex:
2486 return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
2487 case TEK_Aggregate:
2488 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2489 return Slot.asRValue();
2490 }
2491 llvm_unreachable("bad evaluation kind");
2492}
2493
2494/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2495/// method emits the address of the lvalue, then loads the result as an rvalue,
2496/// returning the rvalue.
2498 // Load from __ptrauth.
2499 if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) {
2501 llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal();
2502 return RValue::get(EmitPointerAuthUnqualify(PtrAuth, Value, LV.getType(),
2503 LV.getAddress(),
2504 /*known nonnull*/ false));
2505 }
2506
2507 if (LV.isObjCWeak()) {
2508 // load of a __weak object.
2509 Address AddrWeakObj = LV.getAddress();
2510 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2511 AddrWeakObj));
2512 }
2514 // In MRC mode, we do a load+autorelease.
2515 if (!getLangOpts().ObjCAutoRefCount) {
2517 }
2518
2519 // In ARC mode, we load retained and then consume the value.
2520 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2521 Object = EmitObjCConsumeObject(LV.getType(), Object);
2522 return RValue::get(Object);
2523 }
2524
2525 if (LV.isSimple()) {
2526 assert(!LV.getType()->isFunctionType());
2527
2528 if (LV.getType()->isConstantMatrixType())
2529 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2530
2531 // Everything needs a load.
2532 return RValue::get(EmitLoadOfScalar(LV, Loc));
2533 }
2534
2535 if (LV.isVectorElt()) {
2536 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2537 LV.isVolatileQualified());
2538 llvm::Value *Elt =
2539 Builder.CreateExtractElement(Load, LV.getVectorIdx(), "vecext");
2540 return RValue::get(EmitFromMemory(Elt, LV.getType()));
2541 }
2542
2543 // If this is a reference to a subset of the elements of a vector, either
2544 // shuffle the input or extract/insert them as appropriate.
2545 if (LV.isExtVectorElt()) {
2547 }
2548
2549 // Global Register variables always invoke intrinsics
2550 if (LV.isGlobalReg())
2551 return EmitLoadOfGlobalRegLValue(LV);
2552
2553 if (LV.isMatrixElt()) {
2554 llvm::Value *Idx = LV.getMatrixIdx();
2555 QualType EltTy = LV.getType();
2556 if (const auto *MatTy = EltTy->getAs<ConstantMatrixType>()) {
2557 EltTy = MatTy->getElementType();
2558 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2559 llvm::MatrixBuilder MB(Builder);
2560 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2561 }
2562 }
2563 llvm::LoadInst *Load =
2564 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2565 llvm::Value *Elt = Builder.CreateExtractElement(Load, Idx, "matrixext");
2566 return RValue::get(EmitFromMemory(Elt, EltTy));
2567 }
2568 if (LV.isMatrixRow()) {
2569 QualType MatTy = LV.getType();
2570 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
2571
2572 unsigned NumRows = MT->getNumRows();
2573 unsigned NumCols = MT->getNumColumns();
2574 unsigned NumLanes = NumCols;
2575 llvm::Value *MatrixVec = EmitLoadOfScalar(LV, Loc);
2576 llvm::Value *Row = LV.getMatrixRowIdx();
2577 llvm::Type *ElemTy = ConvertType(MT->getElementType());
2578 llvm::Constant *ColConstsIndices = nullptr;
2579 llvm::MatrixBuilder MB(Builder);
2580
2581 if (LV.isMatrixRowSwizzle()) {
2582 ColConstsIndices = LV.getMatrixRowElts();
2583 NumLanes = llvm::cast<llvm::FixedVectorType>(ColConstsIndices->getType())
2584 ->getNumElements();
2585 }
2586
2587 llvm::Type *RowTy = llvm::FixedVectorType::get(ElemTy, NumLanes);
2588 llvm::Value *Result = llvm::PoisonValue::get(RowTy); // <NumLanes x T>
2589
2590 for (unsigned Col = 0; Col < NumLanes; ++Col) {
2591 llvm::Value *ColIdx;
2592 if (ColConstsIndices)
2593 ColIdx = ColConstsIndices->getAggregateElement(Col);
2594 else
2595 ColIdx = llvm::ConstantInt::get(Row->getType(), Col);
2596 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2598 llvm::Value *EltIndex =
2599 MB.CreateIndex(Row, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
2600 llvm::Value *Elt = Builder.CreateExtractElement(MatrixVec, EltIndex);
2601 llvm::Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2602 Result = Builder.CreateInsertElement(Result, Elt, Lane);
2603 }
2604
2605 return RValue::get(Result);
2606 }
2607
2608 assert(LV.isBitField() && "Unknown LValue type!");
2609 return EmitLoadOfBitfieldLValue(LV, Loc);
2610}
2611
2613 SourceLocation Loc) {
2614 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2615
2616 // Get the output type.
2617 llvm::Type *ResLTy = ConvertType(LV.getType());
2618
2619 Address Ptr = LV.getBitFieldAddress();
2620 llvm::Value *Val =
2621 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2622
2623 bool UseVolatile = LV.isVolatileQualified() &&
2624 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2625 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2626 const unsigned StorageSize =
2627 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2628 if (Info.IsSigned) {
2629 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2630 unsigned HighBits = StorageSize - Offset - Info.Size;
2631 if (HighBits)
2632 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2633 if (Offset + HighBits)
2634 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2635 } else {
2636 if (Offset)
2637 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2638 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2639 Val = Builder.CreateAnd(
2640 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2641 }
2642 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2643 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2644 return RValue::get(Val);
2645}
2646
2647// If this is a reference to a subset of the elements of a vector, create an
2648// appropriate shufflevector.
2650 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2651 LV.isVolatileQualified());
2652
2653 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2654 // IR value to a vector here allows the rest of codegen to behave as normal.
2655 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2656 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2657 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2658 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2659 }
2660
2661 const llvm::Constant *Elts = LV.getExtVectorElts();
2662
2663 // If the result of the expression is a non-vector type, we must be extracting
2664 // a single element. Just codegen as an extractelement.
2665 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2666 if (!ExprVT) {
2667 unsigned InIdx = getAccessedFieldNo(0, Elts);
2668 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2669
2670 llvm::Value *Element = Builder.CreateExtractElement(Vec, Elt);
2671
2672 llvm::Type *LVTy = ConvertType(LV.getType());
2673 if (Element->getType()->getPrimitiveSizeInBits() >
2674 LVTy->getPrimitiveSizeInBits())
2675 Element = Builder.CreateTrunc(Element, LVTy);
2676
2677 return RValue::get(Element);
2678 }
2679
2680 // Always use shuffle vector to try to retain the original program structure
2681 unsigned NumResultElts = ExprVT->getNumElements();
2682
2684 for (unsigned i = 0; i != NumResultElts; ++i)
2685 Mask.push_back(getAccessedFieldNo(i, Elts));
2686
2687 Vec = Builder.CreateShuffleVector(Vec, Mask);
2688
2689 if (LV.getType()->isExtVectorBoolType())
2690 Vec = Builder.CreateTrunc(Vec, ConvertType(LV.getType()), "truncv");
2691
2692 return RValue::get(Vec);
2693}
2694
2695/// Generates lvalue for partial ext_vector access.
2697 Address VectorAddress = LV.getExtVectorAddress();
2698 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2699 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2700
2701 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2702
2703 const llvm::Constant *Elts = LV.getExtVectorElts();
2704 unsigned ix = getAccessedFieldNo(0, Elts);
2705
2706 Address VectorBasePtrPlusIx =
2707 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2708 "vector.elt");
2709
2710 return VectorBasePtrPlusIx;
2711}
2712
2713/// Load of global named registers are always calls to intrinsics.
2715 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2716 "Bad type for register variable");
2717 llvm::MDNode *RegName = cast<llvm::MDNode>(
2718 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2719
2720 // We accept integer and pointer types only
2721 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2722 llvm::Type *Ty = OrigTy;
2723 if (OrigTy->isPointerTy())
2724 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2725 llvm::Type *Types[] = { Ty };
2726
2727 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2728 llvm::Value *Call = Builder.CreateCall(
2729 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2730 if (OrigTy->isPointerTy())
2731 Call = Builder.CreateIntToPtr(Call, OrigTy);
2732 return RValue::get(Call);
2733}
2734
2735/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2736/// lvalue, where both are guaranteed to the have the same type, and that type
2737/// is 'Ty'.
2739 bool isInit) {
2740 if (!Dst.isSimple()) {
2741 if (Dst.isVectorElt()) {
2742 if (getLangOpts().HLSL) {
2743 // HLSL allows direct access to vector elements, so storing to
2744 // individual elements of a vector through VectorElt is handled as
2745 // separate store instructions.
2746 Address DstAddr = Dst.getVectorAddress();
2747 llvm::Type *DestAddrTy = DstAddr.getElementType();
2748 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2750 CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2751
2752 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2753 "vector element type must be at least byte-sized");
2754
2755 llvm::Value *Val = Src.getScalarVal();
2756 if (Val->getType()->getPrimitiveSizeInBits() <
2757 ElemTy->getScalarSizeInBits())
2758 Val = Builder.CreateZExt(Val, ElemTy->getScalarType());
2759
2760 llvm::Value *Idx = Dst.getVectorIdx();
2761 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2762 Address DstElemAddr =
2763 Builder.CreateGEP(DstAddr, {Zero, Idx}, DestAddrTy, ElemAlign);
2764 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
2765 return;
2766 }
2767
2768 // Read/modify/write the vector, inserting the new element.
2769 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2770 Dst.isVolatileQualified());
2771 llvm::Type *VecTy = Vec->getType();
2772 llvm::Value *SrcVal = Src.getScalarVal();
2773
2774 if (SrcVal->getType()->getPrimitiveSizeInBits() <
2775 VecTy->getScalarSizeInBits())
2776 SrcVal = Builder.CreateZExt(SrcVal, VecTy->getScalarType());
2777
2778 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2779 if (IRStoreTy) {
2780 auto *IRVecTy = llvm::FixedVectorType::get(
2781 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2782 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2783 // iN --> <N x i1>.
2784 }
2785
2786 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2787 // types which are mapped to vector LLVM IR types (e.g. for implementing
2788 // an ABI).
2789 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2790 EltTy && EltTy->getNumElements() == 1)
2791 SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2792
2793 Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2794 "vecins");
2795 if (IRStoreTy) {
2796 // <N x i1> --> <iN>.
2797 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2798 }
2799
2800 auto *I = Builder.CreateStore(Vec, Dst.getVectorAddress(),
2801 Dst.isVolatileQualified());
2803 return;
2804 }
2805
2806 // If this is an update of extended vector elements, insert them as
2807 // appropriate.
2808 if (Dst.isExtVectorElt())
2810
2811 if (Dst.isGlobalReg())
2812 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2813
2814 if (Dst.isMatrixElt()) {
2815 if (getLangOpts().HLSL) {
2816 // HLSL allows direct access to matrix elements, so storing to
2817 // individual elements of a matrix through MatrixElt is handled as
2818 // separate store instructions.
2819 Address DstAddr = Dst.getMatrixAddress();
2820 llvm::Type *DestAddrTy = DstAddr.getElementType();
2821 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2823 CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2824
2825 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2826 "matrix element type must be at least byte-sized");
2827
2828 llvm::Value *Val = Src.getScalarVal();
2829 if (Val->getType()->getPrimitiveSizeInBits() <
2830 ElemTy->getScalarSizeInBits())
2831 Val = Builder.CreateZExt(Val, ElemTy->getScalarType());
2832
2833 llvm::Value *Idx = Dst.getMatrixIdx();
2834 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2835 Address DstElemAddr =
2836 Builder.CreateGEP(DstAddr, {Zero, Idx}, DestAddrTy, ElemAlign);
2837 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
2838 return;
2839 }
2840
2841 llvm::Value *Idx = Dst.getMatrixIdx();
2842 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2843 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2844 llvm::MatrixBuilder MB(Builder);
2845 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2846 }
2847 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2848 llvm::Value *InsertVal = Src.getScalarVal();
2849 llvm::Value *Vec =
2850 Builder.CreateInsertElement(Load, InsertVal, Idx, "matins");
2851 auto *I = Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2852 Dst.isVolatileQualified());
2854 return;
2855 }
2856 if (Dst.isMatrixRow()) {
2857 // NOTE: Since there are no other languages that implement matrix single
2858 // subscripting, the logic here is specific to HLSL which allows
2859 // per-element stores to rows of matrices.
2860 assert(getLangOpts().HLSL &&
2861 "Store through matrix row LValues is only implemented for HLSL!");
2862 QualType MatTy = Dst.getType();
2863 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
2864
2865 unsigned NumRows = MT->getNumRows();
2866 unsigned NumCols = MT->getNumColumns();
2867 unsigned NumLanes = NumCols;
2868
2869 Address DstAddr = Dst.getMatrixAddress();
2870 llvm::Type *DestAddrTy = DstAddr.getElementType();
2871 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2872 CharUnits ElemAlign =
2873 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2874
2875 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2876 "matrix element type must be at least byte-sized");
2877
2878 llvm::Value *RowVal = Src.getScalarVal();
2879 if (RowVal->getType()->getScalarType()->getPrimitiveSizeInBits() <
2880 ElemTy->getScalarSizeInBits()) {
2881 auto *RowValVecTy = cast<llvm::FixedVectorType>(RowVal->getType());
2882 llvm::Type *StorageElmTy = llvm::FixedVectorType::get(
2883 ElemTy->getScalarType(), RowValVecTy->getNumElements());
2884 RowVal = Builder.CreateZExt(RowVal, StorageElmTy);
2885 }
2886
2887 llvm::MatrixBuilder MB(Builder);
2888
2889 llvm::Constant *ColConstsIndices = nullptr;
2890 if (Dst.isMatrixRowSwizzle()) {
2891 ColConstsIndices = Dst.getMatrixRowElts();
2892 NumLanes =
2893 llvm::cast<llvm::FixedVectorType>(ColConstsIndices->getType())
2894 ->getNumElements();
2895 }
2896
2897 llvm::Value *Row = Dst.getMatrixRowIdx();
2898 for (unsigned Col = 0; Col < NumLanes; ++Col) {
2899 llvm::Value *ColIdx;
2900 if (ColConstsIndices)
2901 ColIdx = ColConstsIndices->getAggregateElement(Col);
2902 else
2903 ColIdx = llvm::ConstantInt::get(Row->getType(), Col);
2904 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2906 llvm::Value *EltIndex =
2907 MB.CreateIndex(Row, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
2908 llvm::Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2909 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2910 llvm::Value *NewElt = Builder.CreateExtractElement(RowVal, Lane);
2911 Address DstElemAddr =
2912 Builder.CreateGEP(DstAddr, {Zero, EltIndex}, DestAddrTy, ElemAlign);
2913 Builder.CreateStore(NewElt, DstElemAddr, Dst.isVolatileQualified());
2914 }
2915
2916 return;
2917 }
2918
2919 assert(Dst.isBitField() && "Unknown LValue type");
2920 return EmitStoreThroughBitfieldLValue(Src, Dst);
2921 }
2922
2923 // Handle __ptrauth qualification by re-signing the value.
2924 if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) {
2925 Src = RValue::get(EmitPointerAuthQualify(PointerAuth, Src.getScalarVal(),
2926 Dst.getType(), Dst.getAddress(),
2927 /*known nonnull*/ false));
2928 }
2929
2930 // There's special magic for assigning into an ARC-qualified l-value.
2931 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2932 switch (Lifetime) {
2934 llvm_unreachable("present but none");
2935
2937 // nothing special
2938 break;
2939
2941 if (isInit) {
2942 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2943 break;
2944 }
2945 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2946 return;
2947
2949 if (isInit)
2950 // Initialize and then skip the primitive store.
2952 else
2954 /*ignore*/ true);
2955 return;
2956
2959 Src.getScalarVal()));
2960 // fall into the normal path
2961 break;
2962 }
2963 }
2964
2965 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2966 // load of a __weak object.
2967 Address LvalueDst = Dst.getAddress();
2968 llvm::Value *src = Src.getScalarVal();
2969 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2970 return;
2971 }
2972
2973 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2974 // load of a __strong object.
2975 Address LvalueDst = Dst.getAddress();
2976 llvm::Value *src = Src.getScalarVal();
2977 if (Dst.isObjCIvar()) {
2978 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2979 llvm::Type *ResultType = IntPtrTy;
2981 llvm::Value *RHS = dst.emitRawPointer(*this);
2982 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2983 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2984 ResultType, "sub.ptr.lhs.cast");
2985 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2986 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2987 } else if (Dst.isGlobalObjCRef()) {
2988 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2989 Dst.isThreadLocalRef());
2990 }
2991 else
2992 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2993 return;
2994 }
2995
2996 assert(Src.isScalar() && "Can't emit an agg store with this method");
2997 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2998}
2999
3001 llvm::Value **Result) {
3002 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
3003 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
3004 Address Ptr = Dst.getBitFieldAddress();
3005
3006 // Get the source value, truncated to the width of the bit-field.
3007 llvm::Value *SrcVal = Src.getScalarVal();
3008
3009 // Cast the source to the storage type and shift it into place.
3010 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
3011 /*isSigned=*/false);
3012 llvm::Value *MaskedVal = SrcVal;
3013
3014 const bool UseVolatile =
3015 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
3016 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
3017 const unsigned StorageSize =
3018 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
3019 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
3020 // See if there are other bits in the bitfield's storage we'll need to load
3021 // and mask together with source before storing.
3022 if (StorageSize != Info.Size) {
3023 assert(StorageSize > Info.Size && "Invalid bitfield size.");
3024 llvm::Value *Val =
3025 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
3026
3027 // Mask the source value as needed.
3028 if (!Dst.getType()->hasBooleanRepresentation())
3029 SrcVal = Builder.CreateAnd(
3030 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
3031 "bf.value");
3032 MaskedVal = SrcVal;
3033 if (Offset)
3034 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
3035
3036 // Mask out the original value.
3037 Val = Builder.CreateAnd(
3038 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
3039 "bf.clear");
3040
3041 // Or together the unchanged values and the source value.
3042 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
3043 } else {
3044 assert(Offset == 0);
3045 // According to the AACPS:
3046 // When a volatile bit-field is written, and its container does not overlap
3047 // with any non-bit-field member, its container must be read exactly once
3048 // and written exactly once using the access width appropriate to the type
3049 // of the container. The two accesses are not atomic.
3050 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
3051 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
3052 Builder.CreateLoad(Ptr, true, "bf.load");
3053 }
3054
3055 // Write the new value back out.
3056 auto *I = Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
3057 addInstToCurrentSourceAtom(I, SrcVal);
3058
3059 // Return the new value of the bit-field, if requested.
3060 if (Result) {
3061 llvm::Value *ResultVal = MaskedVal;
3062
3063 // Sign extend the value if needed.
3064 if (Info.IsSigned) {
3065 assert(Info.Size <= StorageSize);
3066 unsigned HighBits = StorageSize - Info.Size;
3067 if (HighBits) {
3068 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
3069 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
3070 }
3071 }
3072
3073 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
3074 "bf.result.cast");
3075 *Result = EmitFromMemory(ResultVal, Dst.getType());
3076 }
3077}
3078
3080 LValue Dst) {
3081 llvm::Value *SrcVal = Src.getScalarVal();
3082 Address DstAddr = Dst.getExtVectorAddress();
3083 const llvm::Constant *Elts = Dst.getExtVectorElts();
3084 if (DstAddr.getElementType()->getScalarSizeInBits() >
3085 SrcVal->getType()->getScalarSizeInBits())
3086 SrcVal = Builder.CreateZExt(
3087 SrcVal, convertTypeForLoadStore(Dst.getType(), SrcVal->getType()));
3088
3089 if (getLangOpts().HLSL) {
3090 llvm::Type *DestAddrTy = DstAddr.getElementType();
3091 // HLSL allows storing to scalar values through ExtVector component LValues.
3092 // To support this we need to handle the case where the destination address
3093 // is a scalar.
3094 if (!DestAddrTy->isVectorTy()) {
3095 assert(!Dst.getType()->isVectorType() &&
3096 "this should only occur for non-vector l-values");
3097 Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified());
3098 return;
3099 }
3100
3101 // HLSL allows direct access to vector elements, so storing to individual
3102 // elements of a vector through ExtVector is handled as separate store
3103 // instructions.
3104 // If we are updating multiple elements, Dst and Src are vectors; for
3105 // a single element update they are scalars.
3106 const VectorType *VTy = Dst.getType()->getAs<VectorType>();
3107 unsigned NumSrcElts = VTy ? VTy->getNumElements() : 1;
3109 CGM.getDataLayout().getPrefTypeAlign(DestAddrTy->getScalarType()));
3110 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
3111
3112 for (unsigned I = 0; I != NumSrcElts; ++I) {
3113 llvm::Value *Val = VTy ? Builder.CreateExtractElement(
3114 SrcVal, llvm::ConstantInt::get(Int32Ty, I))
3115 : SrcVal;
3116 unsigned FieldNo = getAccessedFieldNo(I, Elts);
3117 Address DstElemAddr = Address::invalid();
3118 if (FieldNo == 0)
3119 DstElemAddr = DstAddr.withAlignment(ElemAlign);
3120 else
3121 DstElemAddr = Builder.CreateGEP(
3122 DstAddr, {Zero, llvm::ConstantInt::get(Int32Ty, FieldNo)},
3123 DestAddrTy, ElemAlign);
3124 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
3125 }
3126 return;
3127 }
3128
3129 // This access turns into a read/modify/write of the vector. Load the input
3130 // value now.
3131 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
3132 llvm::Type *VecTy = Vec->getType();
3133
3134 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
3135 unsigned NumSrcElts = VTy->getNumElements();
3136 unsigned NumDstElts = cast<llvm::FixedVectorType>(VecTy)->getNumElements();
3137 if (NumDstElts == NumSrcElts) {
3138 // Use shuffle vector is the src and destination are the same number of
3139 // elements and restore the vector mask since it is on the side it will be
3140 // stored.
3141 SmallVector<int, 4> Mask(NumDstElts);
3142 for (unsigned i = 0; i != NumSrcElts; ++i)
3143 Mask[getAccessedFieldNo(i, Elts)] = i;
3144
3145 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
3146 } else if (NumDstElts > NumSrcElts) {
3147 // Extended the source vector to the same length and then shuffle it
3148 // into the destination.
3149 // FIXME: since we're shuffling with undef, can we just use the indices
3150 // into that? This could be simpler.
3151 SmallVector<int, 4> ExtMask;
3152 for (unsigned i = 0; i != NumSrcElts; ++i)
3153 ExtMask.push_back(i);
3154 ExtMask.resize(NumDstElts, -1);
3155 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
3156 // build identity
3158 for (unsigned i = 0; i != NumDstElts; ++i)
3159 Mask.push_back(i);
3160
3161 // When the vector size is odd and .odd or .hi is used, the last element
3162 // of the Elts constant array will be one past the size of the vector.
3163 // Ignore the last element here, if it is greater than the mask size.
3164 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
3165 NumSrcElts--;
3166
3167 // modify when what gets shuffled in
3168 for (unsigned i = 0; i != NumSrcElts; ++i)
3169 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
3170 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
3171 } else {
3172 // We should never shorten the vector
3173 llvm_unreachable("unexpected shorten vector length");
3174 }
3175 } else {
3176 // If the Src is a scalar (not a vector), and the target is a vector it must
3177 // be updating one element.
3178 unsigned InIdx = getAccessedFieldNo(0, Elts);
3179 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
3180
3181 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
3182 }
3183
3184 Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
3185 Dst.isVolatileQualified());
3186}
3187
3188/// Store of global named registers are always calls to intrinsics.
3190 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
3191 "Bad type for register variable");
3192 llvm::MDNode *RegName = cast<llvm::MDNode>(
3193 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
3194 assert(RegName && "Register LValue is not metadata");
3195
3196 // We accept integer and pointer types only
3197 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
3198 llvm::Type *Ty = OrigTy;
3199 if (OrigTy->isPointerTy())
3200 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
3201 llvm::Type *Types[] = { Ty };
3202
3203 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
3204 llvm::Value *Value = Src.getScalarVal();
3205 if (OrigTy->isPointerTy())
3206 Value = Builder.CreatePtrToInt(Value, Ty);
3207 Builder.CreateCall(
3208 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
3209}
3210
3211// setObjCGCLValueClass - sets class of the lvalue for the purpose of
3212// generating write-barries API. It is currently a global, ivar,
3213// or neither.
3214static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
3215 LValue &LV,
3216 bool IsMemberAccess=false) {
3217 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
3218 return;
3219
3220 if (isa<ObjCIvarRefExpr>(E)) {
3221 QualType ExpTy = E->getType();
3222 if (IsMemberAccess && ExpTy->isPointerType()) {
3223 // If ivar is a structure pointer, assigning to field of
3224 // this struct follows gcc's behavior and makes it a non-ivar
3225 // writer-barrier conservatively.
3226 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
3227 if (ExpTy->isRecordType()) {
3228 LV.setObjCIvar(false);
3229 return;
3230 }
3231 }
3232 LV.setObjCIvar(true);
3233 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
3234 LV.setBaseIvarExp(Exp->getBase());
3235 LV.setObjCArray(E->getType()->isArrayType());
3236 return;
3237 }
3238
3239 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
3240 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
3241 if (VD->hasGlobalStorage()) {
3242 LV.setGlobalObjCRef(true);
3243 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
3244 }
3245 }
3246 LV.setObjCArray(E->getType()->isArrayType());
3247 return;
3248 }
3249
3250 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
3251 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3252 return;
3253 }
3254
3255 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
3256 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3257 if (LV.isObjCIvar()) {
3258 // If cast is to a structure pointer, follow gcc's behavior and make it
3259 // a non-ivar write-barrier.
3260 QualType ExpTy = E->getType();
3261 if (ExpTy->isPointerType())
3262 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
3263 if (ExpTy->isRecordType())
3264 LV.setObjCIvar(false);
3265 }
3266 return;
3267 }
3268
3269 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
3270 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
3271 return;
3272 }
3273
3274 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
3275 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3276 return;
3277 }
3278
3279 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
3280 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3281 return;
3282 }
3283
3284 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
3285 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3286 return;
3287 }
3288
3289 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
3290 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
3291 if (LV.isObjCIvar() && !LV.isObjCArray())
3292 // Using array syntax to assigning to what an ivar points to is not
3293 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
3294 LV.setObjCIvar(false);
3295 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
3296 // Using array syntax to assigning to what global points to is not
3297 // same as assigning to the global itself. {id *G;} G[i] = 0;
3298 LV.setGlobalObjCRef(false);
3299 return;
3300 }
3301
3302 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
3303 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
3304 // We don't know if member is an 'ivar', but this flag is looked at
3305 // only in the context of LV.isObjCIvar().
3306 LV.setObjCArray(E->getType()->isArrayType());
3307 return;
3308 }
3309}
3310
3312 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
3313 llvm::Type *RealVarTy, SourceLocation Loc) {
3314 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
3316 CGF, VD, Addr, Loc);
3317 else
3318 Addr =
3319 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
3320
3321 Addr = Addr.withElementType(RealVarTy);
3323}
3324
3326 const VarDecl *VD, QualType T) {
3327 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3328 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3329 // Return an invalid address if variable is MT_To (or MT_Enter starting with
3330 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
3331 // and MT_To (or MT_Enter) with unified memory, return a valid address.
3332 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3333 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3335 return Address::invalid();
3336 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
3337 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3338 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3340 "Expected link clause OR to clause with unified memory enabled.");
3341 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
3343 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
3344}
3345
3346Address
3348 LValueBaseInfo *PointeeBaseInfo,
3349 TBAAAccessInfo *PointeeTBAAInfo) {
3350 llvm::LoadInst *Load =
3351 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
3352 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
3353 QualType PTy = RefLVal.getType()->getPointeeType();
3354 CharUnits Align = CGM.getNaturalTypeAlignment(
3355 PTy, PointeeBaseInfo, PointeeTBAAInfo, /*ForPointeeType=*/true);
3356 if (!PTy->isIncompleteType()) {
3357 llvm::LLVMContext &Ctx = getLLVMContext();
3358 llvm::MDBuilder MDB(Ctx);
3359 // Emit !nonnull metadata
3360 if (CGM.getTypes().getTargetAddressSpace(PTy) == 0 &&
3361 !CGM.getCodeGenOpts().NullPointerIsValid)
3362 Load->setMetadata(llvm::LLVMContext::MD_nonnull,
3363 llvm::MDNode::get(Ctx, {}));
3364 // Emit !align metadata
3365 if (PTy->isObjectType()) {
3366 auto AlignVal = Align.getQuantity();
3367 if (AlignVal > 1) {
3368 Load->setMetadata(
3369 llvm::LLVMContext::MD_align,
3370 llvm::MDNode::get(Ctx, MDB.createConstant(llvm::ConstantInt::get(
3371 Builder.getInt64Ty(), AlignVal))));
3372 }
3373 }
3374 }
3375 return makeNaturalAddressForPointer(Load, PTy, Align,
3376 /*ForPointeeType=*/true, PointeeBaseInfo,
3377 PointeeTBAAInfo);
3378}
3379
3381 LValueBaseInfo PointeeBaseInfo;
3382 TBAAAccessInfo PointeeTBAAInfo;
3383 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
3384 &PointeeTBAAInfo);
3385 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
3386 PointeeBaseInfo, PointeeTBAAInfo);
3387}
3388
3390 const PointerType *PtrTy,
3391 LValueBaseInfo *BaseInfo,
3392 TBAAAccessInfo *TBAAInfo) {
3393 llvm::Value *Addr = Builder.CreateLoad(Ptr);
3394 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
3395 CharUnits(), /*ForPointeeType=*/true,
3396 BaseInfo, TBAAInfo);
3397}
3398
3400 const PointerType *PtrTy) {
3401 LValueBaseInfo BaseInfo;
3402 TBAAAccessInfo TBAAInfo;
3403 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
3404 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
3405}
3406
3408 const Expr *E, const VarDecl *VD) {
3409 QualType T = E->getType();
3410
3411 // If it's thread_local, emit a call to its wrapper function instead.
3412 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3414 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
3415 // Check if the variable is marked as declare target with link clause in
3416 // device codegen.
3417 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
3419 if (Addr.isValid())
3421 }
3422
3423 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
3424
3425 if (VD->getTLSKind() != VarDecl::TLS_None)
3426 V = CGF.Builder.CreateThreadLocalAddress(V);
3427
3428 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
3429 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
3430 Address Addr(V, RealVarTy, Alignment);
3431 // Emit reference to the private copy of the variable if it is an OpenMP
3432 // threadprivate variable.
3433 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
3434 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3435 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
3436 E->getExprLoc());
3437 }
3438 LValue LV = VD->getType()->isReferenceType() ?
3442 setObjCGCLValueClass(CGF.getContext(), E, LV);
3443 return LV;
3444}
3445
3447 llvm::Type *Ty) {
3448 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3449 if (FD->hasAttr<WeakRefAttr>()) {
3451 return aliasee.getPointer();
3452 }
3453
3454 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
3455 return V;
3456}
3457
3458static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
3459 GlobalDecl GD) {
3460 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3461 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
3462 QualType ETy = E->getType();
3464 if (auto *GV = dyn_cast<llvm::GlobalValue>(V))
3465 V = llvm::NoCFIValue::get(GV);
3466 }
3467 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
3468 return CGF.MakeAddrLValue(V, ETy, Alignment, AlignmentSource::Decl);
3469}
3470
3472 llvm::Value *ThisValue) {
3473
3474 return CGF.EmitLValueForLambdaField(FD, ThisValue);
3475}
3476
3477/// Named Registers are named metadata pointing to the register name
3478/// which will be read from/written to as an argument to the intrinsic
3479/// @llvm.read/write_register.
3480/// So far, only the name is being passed down, but other options such as
3481/// register type, allocation type or even optimization options could be
3482/// passed down via the metadata node.
3483static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
3484 SmallString<64> Name("llvm.named.register.");
3485 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
3486 assert(Asm->getLabel().size() < 64-Name.size() &&
3487 "Register name too big");
3488 Name.append(Asm->getLabel());
3489 llvm::NamedMDNode *M =
3490 CGM.getModule().getOrInsertNamedMetadata(Name);
3491 if (M->getNumOperands() == 0) {
3492 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
3493 Asm->getLabel());
3494 llvm::Metadata *Ops[] = {Str};
3495 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
3496 }
3497
3498 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
3499
3500 llvm::Value *Ptr =
3501 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
3502 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
3503}
3504
3505/// Determine whether we can emit a reference to \p VD from the current
3506/// context, despite not necessarily having seen an odr-use of the variable in
3507/// this context.
3509 const DeclRefExpr *E,
3510 const VarDecl *VD) {
3511 // For a variable declared in an enclosing scope, do not emit a spurious
3512 // reference even if we have a capture, as that will emit an unwarranted
3513 // reference to our capture state, and will likely generate worse code than
3514 // emitting a local copy.
3516 return false;
3517
3518 // For a local declaration declared in this function, we can always reference
3519 // it even if we don't have an odr-use.
3520 if (VD->hasLocalStorage()) {
3521 return VD->getDeclContext() ==
3522 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
3523 }
3524
3525 // For a global declaration, we can emit a reference to it if we know
3526 // for sure that we are able to emit a definition of it.
3527 VD = VD->getDefinition(CGF.getContext());
3528 if (!VD)
3529 return false;
3530
3531 // Don't emit a spurious reference if it might be to a variable that only
3532 // exists on a different device / target.
3533 // FIXME: This is unnecessarily broad. Check whether this would actually be a
3534 // cross-target reference.
3535 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3536 CGF.getLangOpts().OpenCL) {
3537 return false;
3538 }
3539
3540 // We can emit a spurious reference only if the linkage implies that we'll
3541 // be emitting a non-interposable symbol that will be retained until link
3542 // time.
3543 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3544 case llvm::GlobalValue::ExternalLinkage:
3545 case llvm::GlobalValue::LinkOnceODRLinkage:
3546 case llvm::GlobalValue::WeakODRLinkage:
3547 case llvm::GlobalValue::InternalLinkage:
3548 case llvm::GlobalValue::PrivateLinkage:
3549 return true;
3550 default:
3551 return false;
3552 }
3553}
3554
3556 const NamedDecl *ND = E->getDecl();
3557 QualType T = E->getType();
3558
3559 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3560 "should not emit an unevaluated operand");
3561
3562 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3563 // Global Named registers access via intrinsics only
3564 if (VD->getStorageClass() == SC_Register &&
3565 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3566 return EmitGlobalNamedRegister(VD, CGM);
3567
3568 // If this DeclRefExpr does not constitute an odr-use of the variable,
3569 // we're not permitted to emit a reference to it in general, and it might
3570 // not be captured if capture would be necessary for a use. Emit the
3571 // constant value directly instead.
3572 if (E->isNonOdrUse() == NOUR_Constant &&
3573 (VD->getType()->isReferenceType() ||
3574 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3575 VD->getAnyInitializer(VD);
3576 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3577 E->getLocation(), *VD->evaluateValue(), VD->getType());
3578 assert(Val && "failed to emit constant expression");
3579
3581 if (!VD->getType()->isReferenceType()) {
3582 // Spill the constant value to a global.
3583 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3584 getContext().getDeclAlign(VD));
3585 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3586 auto *PTy = llvm::PointerType::get(
3587 getLLVMContext(), getTypes().getTargetAddressSpace(VD->getType()));
3588 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3589 } else {
3590 // Should we be using the alignment of the constant pointer we emitted?
3591 CharUnits Alignment =
3592 CGM.getNaturalTypeAlignment(E->getType(),
3593 /* BaseInfo= */ nullptr,
3594 /* TBAAInfo= */ nullptr,
3595 /* forPointeeType= */ true);
3596 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3597 }
3599 }
3600
3601 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3602
3603 // Check for captured variables.
3605 VD = VD->getCanonicalDecl();
3606 if (auto *FD = LambdaCaptureFields.lookup(VD))
3607 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3608 if (CapturedStmtInfo) {
3609 auto I = LocalDeclMap.find(VD);
3610 if (I != LocalDeclMap.end()) {
3611 LValue CapLVal;
3612 if (VD->getType()->isReferenceType())
3613 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3615 else
3616 CapLVal = MakeAddrLValue(I->second, T);
3617 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3618 // in simd context.
3619 if (getLangOpts().OpenMP &&
3620 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3621 CapLVal.setNontemporal(/*Value=*/true);
3622 return CapLVal;
3623 }
3624 LValue CapLVal =
3625 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
3626 CapturedStmtInfo->getContextValue());
3627 Address LValueAddress = CapLVal.getAddress();
3628 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3629 LValueAddress.getElementType(),
3630 getContext().getDeclAlign(VD)),
3631 CapLVal.getType(),
3633 CapLVal.getTBAAInfo());
3634 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3635 // in simd context.
3636 if (getLangOpts().OpenMP &&
3637 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3638 CapLVal.setNontemporal(/*Value=*/true);
3639 return CapLVal;
3640 }
3641
3642 assert(isa<BlockDecl>(CurCodeDecl));
3643 Address addr = GetAddrOfBlockDecl(VD);
3644 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3645 }
3646 }
3647
3648 // FIXME: We should be able to assert this for FunctionDecls as well!
3649 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3650 // those with a valid source location.
3651 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3652 !E->getLocation().isValid()) &&
3653 "Should not use decl without marking it used!");
3654
3655 if (ND->hasAttr<WeakRefAttr>()) {
3656 const auto *VD = cast<ValueDecl>(ND);
3657 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3658 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3659 }
3660
3661 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3662 // Check if this is a global variable.
3663 if (VD->hasLinkage() || VD->isStaticDataMember())
3664 return EmitGlobalVarDeclLValue(*this, E, VD);
3665
3666 Address addr = Address::invalid();
3667
3668 // The variable should generally be present in the local decl map.
3669 auto iter = LocalDeclMap.find(VD);
3670 if (iter != LocalDeclMap.end()) {
3671 addr = iter->second;
3672
3673 // Otherwise, it might be static local we haven't emitted yet for
3674 // some reason; most likely, because it's in an outer function.
3675 } else if (VD->isStaticLocal()) {
3676 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3677 *VD, CGM.getLLVMLinkageVarDefinition(VD));
3678 addr = Address(
3679 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3680
3681 // No other cases for now.
3682 } else {
3683 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3684 }
3685
3686 // Handle threadlocal function locals.
3687 if (VD->getTLSKind() != VarDecl::TLS_None)
3688 addr = addr.withPointer(
3689 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3691
3692 // Check for OpenMP threadprivate variables.
3693 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3694 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3696 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3697 E->getExprLoc());
3698 }
3699
3700 // Drill into block byref variables.
3701 bool isBlockByref = VD->isEscapingByref();
3702 if (isBlockByref) {
3703 addr = emitBlockByrefAddress(addr, VD);
3704 }
3705
3706 // Drill into reference types.
3707 LValue LV = VD->getType()->isReferenceType() ?
3710
3711 bool isLocalStorage = VD->hasLocalStorage();
3712
3713 bool NonGCable = isLocalStorage &&
3714 !VD->getType()->isReferenceType() &&
3715 !isBlockByref;
3716 if (NonGCable) {
3718 LV.setNonGC(true);
3719 }
3720
3721 bool isImpreciseLifetime =
3722 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3723 if (isImpreciseLifetime)
3726 return LV;
3727 }
3728
3729 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3730 return EmitFunctionDeclLValue(*this, E, FD);
3731
3732 // FIXME: While we're emitting a binding from an enclosing scope, all other
3733 // DeclRefExprs we see should be implicitly treated as if they also refer to
3734 // an enclosing scope.
3735 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3737 auto *FD = LambdaCaptureFields.lookup(BD);
3738 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3739 }
3740 // Suppress debug location updates when visiting the binding, since the
3741 // binding may emit instructions that would otherwise be associated with the
3742 // binding itself, rather than the expression referencing the binding. (this
3743 // leads to jumpy debug stepping behavior where the location/debugger jump
3744 // back to the binding declaration, then back to the expression referencing
3745 // the binding)
3747 return EmitLValue(BD->getBinding(), NotKnownNonNull);
3748 }
3749
3750 // We can form DeclRefExprs naming GUID declarations when reconstituting
3751 // non-type template parameters into expressions.
3752 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3753 return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3755
3756 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3757 ConstantAddress ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3758 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3759
3760 if (AS != T.getAddressSpace()) {
3761 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3762 llvm::Type *PtrTy =
3763 llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3764 llvm::Constant *ASC = CGM.performAddrSpaceCast(ATPO.getPointer(), PtrTy);
3765 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3766 }
3767
3768 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3769 }
3770
3771 llvm_unreachable("Unhandled DeclRefExpr");
3772}
3773
3775 // __extension__ doesn't affect lvalue-ness.
3776 if (E->getOpcode() == UO_Extension)
3777 return EmitLValue(E->getSubExpr());
3778
3780 switch (E->getOpcode()) {
3781 default: llvm_unreachable("Unknown unary operator lvalue!");
3782 case UO_Deref: {
3784 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3785
3786 LValueBaseInfo BaseInfo;
3787 TBAAAccessInfo TBAAInfo;
3789 &TBAAInfo);
3790 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3792
3793 // We should not generate __weak write barrier on indirect reference
3794 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3795 // But, we continue to generate __strong write barrier on indirect write
3796 // into a pointer to object.
3797 if (getLangOpts().ObjC &&
3798 getLangOpts().getGC() != LangOptions::NonGC &&
3799 LV.isObjCWeak())
3801 return LV;
3802 }
3803 case UO_Real:
3804 case UO_Imag: {
3805 LValue LV = EmitLValue(E->getSubExpr());
3806 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3807
3808 // __real is valid on scalars. This is a faster way of testing that.
3809 // __imag can only produce an rvalue on scalars.
3810 if (E->getOpcode() == UO_Real &&
3811 !LV.getAddress().getElementType()->isStructTy()) {
3812 assert(E->getSubExpr()->getType()->isArithmeticType());
3813 return LV;
3814 }
3815
3816 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3817
3818 Address Component =
3819 (E->getOpcode() == UO_Real
3822 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3823 CGM.getTBAAInfoForSubobject(LV, T));
3824 ElemLV.getQuals().addQualifiers(LV.getQuals());
3825 return ElemLV;
3826 }
3827 case UO_PreInc:
3828 case UO_PreDec: {
3829 LValue LV = EmitLValue(E->getSubExpr());
3830 bool isInc = E->getOpcode() == UO_PreInc;
3831
3832 if (E->getType()->isAnyComplexType())
3833 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3834 else
3835 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3836 return LV;
3837 }
3838 }
3839}
3840
3842 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3844}
3845
3847 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3849}
3850
3852 auto SL = E->getFunctionName();
3853 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3854 StringRef FnName = CurFn->getName();
3855 FnName.consume_front("\01");
3856 StringRef NameItems[] = {
3858 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3859 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3860 std::string Name = std::string(SL->getString());
3861 if (!Name.empty()) {
3862 unsigned Discriminator =
3863 CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3864 if (Discriminator)
3865 Name += "_" + Twine(Discriminator + 1).str();
3866 auto C = CGM.GetAddrOfConstantCString(Name, GVName);
3868 } else {
3869 auto C = CGM.GetAddrOfConstantCString(std::string(FnName), GVName);
3871 }
3872 }
3873 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3875}
3876
3877/// Emit a type description suitable for use by a runtime sanitizer library. The
3878/// format of a type descriptor is
3879///
3880/// \code
3881/// { i16 TypeKind, i16 TypeInfo }
3882/// \endcode
3883///
3884/// followed by an array of i8 containing the type name with extra information
3885/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3886/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3887/// anything else.
3889 // Only emit each type's descriptor once.
3890 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3891 return C;
3892
3893 uint16_t TypeKind = TK_Unknown;
3894 uint16_t TypeInfo = 0;
3895 bool IsBitInt = false;
3896
3897 if (T->isIntegerType()) {
3898 TypeKind = TK_Integer;
3899 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3900 (T->isSignedIntegerType() ? 1 : 0);
3901 // Follow suggestion from discussion of issue 64100.
3902 // So we can write the exact amount of bits in TypeName after '\0'
3903 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3904 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3905 // Do a sanity checks as we are using 32-bit type to store bit length.
3906 assert(getContext().getTypeSize(T) > 0 &&
3907 " non positive amount of bits in __BitInt type");
3908 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3909 " too many bits in __BitInt type");
3910
3911 // Redefine TypeKind with the actual __BitInt type if we have signed
3912 // BitInt.
3913 TypeKind = TK_BitInt;
3914 IsBitInt = true;
3915 }
3916 } else if (T->isFloatingType()) {
3917 TypeKind = TK_Float;
3919 }
3920
3921 // Format the type name as if for a diagnostic, including quotes and
3922 // optionally an 'aka'.
3923 SmallString<32> Buffer;
3924 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
3925 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3926 StringRef(), {}, Buffer, {});
3927
3928 if (IsBitInt) {
3929 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3930 // endianness, zero.
3931 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3932 const auto *EIT = T->castAs<BitIntType>();
3933 uint32_t Bits = EIT->getNumBits();
3934 llvm::support::endian::write32(S + 1, Bits,
3935 getTarget().isBigEndian()
3936 ? llvm::endianness::big
3937 : llvm::endianness::little);
3938 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3939 Buffer.append(Str);
3940 }
3941
3942 llvm::Constant *Components[] = {
3943 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3944 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3945 };
3946 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3947
3948 auto *GV = new llvm::GlobalVariable(
3949 CGM.getModule(), Descriptor->getType(),
3950 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3951 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3952 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3953
3954 // Remember the descriptor for this type.
3955 CGM.setTypeDescriptorInMap(T, GV);
3956
3957 return GV;
3958}
3959
3960llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3961 llvm::Type *TargetTy = IntPtrTy;
3962
3963 if (V->getType() == TargetTy)
3964 return V;
3965
3966 // Floating-point types which fit into intptr_t are bitcast to integers
3967 // and then passed directly (after zero-extension, if necessary).
3968 if (V->getType()->isFloatingPointTy()) {
3969 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3970 if (Bits <= TargetTy->getIntegerBitWidth())
3971 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3972 Bits));
3973 }
3974
3975 // Integers which fit in intptr_t are zero-extended and passed directly.
3976 if (V->getType()->isIntegerTy() &&
3977 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3978 return Builder.CreateZExt(V, TargetTy);
3979
3980 // Pointers are passed directly, everything else is passed by address.
3981 if (!V->getType()->isPointerTy()) {
3982 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3983 Builder.CreateStore(V, Ptr);
3984 V = Ptr.getPointer();
3985 }
3986 return Builder.CreatePtrToInt(V, TargetTy);
3987}
3988
3989/// Emit a representation of a SourceLocation for passing to a handler
3990/// in a sanitizer runtime library. The format for this data is:
3991/// \code
3992/// struct SourceLocation {
3993/// const char *Filename;
3994/// int32_t Line, Column;
3995/// };
3996/// \endcode
3997/// For an invalid SourceLocation, the Filename pointer is null.
3999 llvm::Constant *Filename;
4000 int Line, Column;
4001
4003 if (PLoc.isValid()) {
4004 StringRef FilenameString = PLoc.getFilename();
4005
4006 int PathComponentsToStrip =
4007 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
4008 if (PathComponentsToStrip < 0) {
4009 assert(PathComponentsToStrip != INT_MIN);
4010 int PathComponentsToKeep = -PathComponentsToStrip;
4011 auto I = llvm::sys::path::rbegin(FilenameString);
4012 auto E = llvm::sys::path::rend(FilenameString);
4013 while (I != E && --PathComponentsToKeep)
4014 ++I;
4015
4016 FilenameString = FilenameString.substr(I - E);
4017 } else if (PathComponentsToStrip > 0) {
4018 auto I = llvm::sys::path::begin(FilenameString);
4019 auto E = llvm::sys::path::end(FilenameString);
4020 while (I != E && PathComponentsToStrip--)
4021 ++I;
4022
4023 if (I != E)
4024 FilenameString =
4025 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
4026 else
4027 FilenameString = llvm::sys::path::filename(FilenameString);
4028 }
4029
4030 auto FilenameGV =
4031 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
4032 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
4034 FilenameGV.getPointer()->stripPointerCasts()));
4035 Filename = FilenameGV.getPointer();
4036 Line = PLoc.getLine();
4037 Column = PLoc.getColumn();
4038 } else {
4039 Filename = llvm::Constant::getNullValue(Int8PtrTy);
4040 Line = Column = 0;
4041 }
4042
4043 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
4044 Builder.getInt32(Column)};
4045
4046 return llvm::ConstantStruct::getAnon(Data);
4047}
4048
4049namespace {
4050/// Specify under what conditions this check can be recovered
4051enum class CheckRecoverableKind {
4052 /// Always terminate program execution if this check fails.
4054 /// Check supports recovering, runtime has both fatal (noreturn) and
4055 /// non-fatal handlers for this check.
4056 Recoverable,
4057 /// Runtime conditionally aborts, always need to support recovery.
4059};
4060}
4061
4062static CheckRecoverableKind
4064 if (Ordinal == SanitizerKind::SO_Vptr)
4065 return CheckRecoverableKind::AlwaysRecoverable;
4066 else if (Ordinal == SanitizerKind::SO_Return ||
4067 Ordinal == SanitizerKind::SO_Unreachable)
4068 return CheckRecoverableKind::Unrecoverable;
4069 else
4070 return CheckRecoverableKind::Recoverable;
4071}
4072
4073namespace {
4074struct SanitizerHandlerInfo {
4075 char const *const Name;
4076 unsigned Version;
4077};
4078}
4079
4080const SanitizerHandlerInfo SanitizerHandlers[] = {
4081#define SANITIZER_CHECK(Enum, Name, Version, Msg) {#Name, Version},
4083#undef SANITIZER_CHECK
4084};
4085
4087 llvm::FunctionType *FnType,
4089 SanitizerHandler CheckHandler,
4090 CheckRecoverableKind RecoverKind, bool IsFatal,
4091 llvm::BasicBlock *ContBB, bool NoMerge) {
4092 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
4093 std::optional<ApplyDebugLocation> DL;
4094 if (!CGF.Builder.getCurrentDebugLocation()) {
4095 // Ensure that the call has at least an artificial debug location.
4096 DL.emplace(CGF, SourceLocation());
4097 }
4098 bool NeedsAbortSuffix =
4099 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
4100 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
4101 bool HandlerPreserveAllRegs =
4102 CGF.CGM.getCodeGenOpts().SanitizeHandlerPreserveAllRegs;
4103 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
4104 const StringRef CheckName = CheckInfo.Name;
4105 std::string FnName = "__ubsan_handle_" + CheckName.str();
4106 if (CheckInfo.Version && !MinimalRuntime)
4107 FnName += "_v" + llvm::utostr(CheckInfo.Version);
4108 if (MinimalRuntime)
4109 FnName += "_minimal";
4110 if (NeedsAbortSuffix)
4111 FnName += "_abort";
4112 if (HandlerPreserveAllRegs && !NeedsAbortSuffix)
4113 FnName += "_preserve";
4114 bool MayReturn =
4115 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
4116
4117 llvm::AttrBuilder B(CGF.getLLVMContext());
4118 if (!MayReturn) {
4119 B.addAttribute(llvm::Attribute::NoReturn)
4120 .addAttribute(llvm::Attribute::NoUnwind);
4121 }
4122 B.addUWTableAttr(llvm::UWTableKind::Default);
4123
4124 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
4125 FnType, FnName,
4126 llvm::AttributeList::get(CGF.getLLVMContext(),
4127 llvm::AttributeList::FunctionIndex, B),
4128 /*Local=*/true);
4129 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
4130 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
4131 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4132 if (NoMerge)
4133 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
4134 if (HandlerPreserveAllRegs && !NeedsAbortSuffix) {
4135 // N.B. there is also a clang::CallingConv which is not what we want here.
4136 HandlerCall->setCallingConv(llvm::CallingConv::PreserveAll);
4137 }
4138 if (!MayReturn) {
4139 HandlerCall->setDoesNotReturn();
4140 CGF.Builder.CreateUnreachable();
4141 } else {
4142 CGF.Builder.CreateBr(ContBB);
4143 }
4144}
4145
4147 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
4148 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
4149 ArrayRef<llvm::Value *> DynamicArgs, const TrapReason *TR) {
4150 assert(IsSanitizerScope);
4151 assert(Checked.size() > 0);
4152 assert(CheckHandler >= 0 &&
4153 size_t(CheckHandler) < std::size(SanitizerHandlers));
4154 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
4155
4156 llvm::Value *FatalCond = nullptr;
4157 llvm::Value *RecoverableCond = nullptr;
4158 llvm::Value *TrapCond = nullptr;
4159 bool NoMerge = false;
4160 // Expand checks into:
4161 // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
4162 // We need separate allow_ubsan_check intrinsics because they have separately
4163 // specified cutoffs.
4164 // This expression looks expensive but will be simplified after
4165 // LowerAllowCheckPass.
4166 for (auto &[Check, Ord] : Checked) {
4167 llvm::Value *GuardedCheck = Check;
4169 (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
4170 llvm::Value *Allow = Builder.CreateCall(
4171 CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
4172 llvm::ConstantInt::get(CGM.Int8Ty, Ord));
4173 GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
4174 }
4175
4176 // -fsanitize-trap= overrides -fsanitize-recover=.
4177 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
4178 : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
4179 ? RecoverableCond
4180 : FatalCond;
4181 Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
4182
4183 if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
4184 NoMerge = true;
4185 }
4186
4187 if (TrapCond)
4188 EmitTrapCheck(TrapCond, CheckHandler, NoMerge, TR);
4189 if (!FatalCond && !RecoverableCond)
4190 return;
4191
4192 llvm::Value *JointCond;
4193 if (FatalCond && RecoverableCond)
4194 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
4195 else
4196 JointCond = FatalCond ? FatalCond : RecoverableCond;
4197 assert(JointCond);
4198
4199 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
4200 assert(SanOpts.has(Checked[0].second));
4201#ifndef NDEBUG
4202 for (int i = 1, n = Checked.size(); i < n; ++i) {
4203 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
4204 "All recoverable kinds in a single check must be same!");
4205 assert(SanOpts.has(Checked[i].second));
4206 }
4207#endif
4208
4209 llvm::BasicBlock *Cont = createBasicBlock("cont");
4210 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
4211 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
4212 // Give hint that we very much don't expect to execute the handler
4213 llvm::MDBuilder MDHelper(getLLVMContext());
4214 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4215 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
4216 EmitBlock(Handlers);
4217
4218 // Clear arguments for the MinimalRuntime handler.
4219 if (CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
4220 StaticArgs = {};
4221 DynamicArgs = {};
4222 }
4223
4224 // Handler functions take an i8* pointing to the (handler-specific) static
4225 // information block, followed by a sequence of intptr_t arguments
4226 // representing operand values.
4229
4230 Args.reserve(DynamicArgs.size() + 1);
4231 ArgTypes.reserve(DynamicArgs.size() + 1);
4232
4233 // Emit handler arguments and create handler function type.
4234 if (!StaticArgs.empty()) {
4235 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
4236 auto *InfoPtr = new llvm::GlobalVariable(
4237 CGM.getModule(), Info->getType(),
4238 // Non-constant global is used in a handler to deduplicate reports.
4239 // TODO: change deduplication logic and make it constant.
4240 /*isConstant=*/false, llvm::GlobalVariable::PrivateLinkage, Info, "",
4241 nullptr, llvm::GlobalVariable::NotThreadLocal,
4242 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
4243 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4244 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
4245 Args.push_back(InfoPtr);
4246 ArgTypes.push_back(Args.back()->getType());
4247 }
4248
4249 for (llvm::Value *DynamicArg : DynamicArgs) {
4250 Args.push_back(EmitCheckValue(DynamicArg));
4251 ArgTypes.push_back(IntPtrTy);
4252 }
4253
4254 llvm::FunctionType *FnType =
4255 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
4256
4257 if (!FatalCond || !RecoverableCond) {
4258 // Simple case: we need to generate a single handler call, either
4259 // fatal, or non-fatal.
4260 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
4261 (FatalCond != nullptr), Cont, NoMerge);
4262 } else {
4263 // Emit two handler calls: first one for set of unrecoverable checks,
4264 // another one for recoverable.
4265 llvm::BasicBlock *NonFatalHandlerBB =
4266 createBasicBlock("non_fatal." + CheckName);
4267 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
4268 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
4269 EmitBlock(FatalHandlerBB);
4270 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
4271 NonFatalHandlerBB, NoMerge);
4272 EmitBlock(NonFatalHandlerBB);
4273 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
4274 Cont, NoMerge);
4275 }
4276
4277 EmitBlock(Cont);
4278}
4279
4281 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
4282 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4283 ArrayRef<llvm::Constant *> StaticArgs) {
4284 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
4285
4286 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
4287 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
4288
4289 llvm::MDBuilder MDHelper(getLLVMContext());
4290 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4291 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
4292
4293 EmitBlock(CheckBB);
4294
4295 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
4296
4297 llvm::CallInst *CheckCall;
4298 llvm::FunctionCallee SlowPathFn;
4299 if (WithDiag) {
4300 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
4301 auto *InfoPtr =
4302 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
4303 llvm::GlobalVariable::PrivateLinkage, Info);
4304 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4305 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
4306
4307 SlowPathFn = CGM.getModule().getOrInsertFunction(
4308 "__cfi_slowpath_diag",
4309 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
4310 false));
4311 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
4312 } else {
4313 SlowPathFn = CGM.getModule().getOrInsertFunction(
4314 "__cfi_slowpath",
4315 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
4316 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
4317 }
4318
4319 CGM.setDSOLocal(
4320 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
4321 CheckCall->setDoesNotThrow();
4322
4323 EmitBlock(Cont);
4324}
4325
4326// Emit a stub for __cfi_check function so that the linker knows about this
4327// symbol in LTO mode.
4329 llvm::Module *M = &CGM.getModule();
4330 ASTContext &C = getContext();
4331 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
4332
4334 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
4335 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
4336 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
4338 FnArgs.push_back(&ArgCallsiteTypeId);
4339 FnArgs.push_back(&ArgAddr);
4340 FnArgs.push_back(&ArgCFICheckFailData);
4341 const CGFunctionInfo &FI =
4342 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
4343
4344 llvm::Function *F = llvm::Function::Create(
4345 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
4346 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
4347 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4348 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4349 F->setAlignment(llvm::Align(4096));
4350 CGM.setDSOLocal(F);
4351
4352 llvm::LLVMContext &Ctx = M->getContext();
4353 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
4354 // CrossDSOCFI pass is not executed if there is no executable code.
4355 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
4356 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
4357 llvm::ReturnInst::Create(Ctx, nullptr, BB);
4358}
4359
4360// This function is basically a switch over the CFI failure kind, which is
4361// extracted from CFICheckFailData (1st function argument). Each case is either
4362// llvm.trap or a call to one of the two runtime handlers, based on
4363// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
4364// failure kind) traps, but this should really never happen. CFICheckFailData
4365// can be nullptr if the calling module has -fsanitize-trap behavior for this
4366// check kind; in this case __cfi_check_fail traps as well.
4368 auto CheckHandler = SanitizerHandler::CFICheckFail;
4369 // TODO: the SanitizerKind is not yet determined for this check (and might
4370 // not even be available, if Data == nullptr). However, we still want to
4371 // annotate the instrumentation. We approximate this by using all the CFI
4372 // kinds.
4373 SanitizerDebugLocation SanScope(
4374 this,
4375 {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall,
4376 SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast,
4377 SanitizerKind::SO_CFIICall},
4378 CheckHandler);
4379 FunctionArgList Args;
4384 Args.push_back(&ArgData);
4385 Args.push_back(&ArgAddr);
4386
4387 const CGFunctionInfo &FI =
4388 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
4389
4390 llvm::Function *F = llvm::Function::Create(
4391 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
4392 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
4393
4394 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4395 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4396 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
4397
4398 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
4399 SourceLocation());
4400
4402
4403 // This function is not affected by NoSanitizeList. This function does
4404 // not have a source location, but "src:*" would still apply. Revert any
4405 // changes to SanOpts made in StartFunction.
4406 SanOpts = CGM.getLangOpts().Sanitize;
4407
4408 llvm::Value *Data =
4409 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
4410 CGM.getContext().VoidPtrTy, ArgData.getLocation());
4411 llvm::Value *Addr =
4412 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
4413 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
4414
4415 // Data == nullptr means the calling module has trap behaviour for this check.
4416 llvm::Value *DataIsNotNullPtr =
4417 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
4418 // TODO: since there is no data, we don't know the CheckKind, and therefore
4419 // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to
4420 // NoMerge = false. Users can disable merging by disabling optimization.
4421 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail,
4422 /*NoMerge=*/false);
4423
4424 llvm::StructType *SourceLocationTy =
4425 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
4426 llvm::StructType *CfiCheckFailDataTy =
4427 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
4428
4429 llvm::Value *V = Builder.CreateConstGEP2_32(
4430 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, DefaultPtrTy), 0, 0);
4431
4432 Address CheckKindAddr(V, Int8Ty, getIntAlign());
4433 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
4434
4435 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
4436 CGM.getLLVMContext(),
4437 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
4438 llvm::Value *ValidVtable = Builder.CreateZExt(
4439 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
4440 {Addr, AllVtables}),
4441 IntPtrTy);
4442
4443 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
4444 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
4445 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
4446 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
4447 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
4448 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
4449
4450 for (auto CheckKindOrdinalPair : CheckKinds) {
4451 int Kind = CheckKindOrdinalPair.first;
4452 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
4453
4454 // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of
4455 // relying on the SanitizerScope with all CFI ordinals
4456
4457 llvm::Value *Cond =
4458 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
4459 if (CGM.getLangOpts().Sanitize.has(Ordinal))
4460 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
4461 {}, {Data, Addr, ValidVtable});
4462 else
4463 // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers.
4464 // Although the compiler allows SanitizeMergeHandlers to be set
4465 // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp
4466 // requires that SanitizeMergeHandlers is a subset of Sanitize.
4467 EmitTrapCheck(Cond, CheckHandler, /*NoMerge=*/false);
4468 }
4469
4471 // The only reference to this function will be created during LTO link.
4472 // Make sure it survives until then.
4473 CGM.addUsedGlobal(F);
4474}
4475
4477 if (SanOpts.has(SanitizerKind::Unreachable)) {
4478 auto CheckOrdinal = SanitizerKind::SO_Unreachable;
4479 auto CheckHandler = SanitizerHandler::BuiltinUnreachable;
4480 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4481 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
4482 CheckOrdinal),
4483 CheckHandler, EmitCheckSourceLocation(Loc), {});
4484 }
4485 Builder.CreateUnreachable();
4486}
4487
4488void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
4489 SanitizerHandler CheckHandlerID,
4490 bool NoMerge, const TrapReason *TR) {
4491 llvm::BasicBlock *Cont = createBasicBlock("cont");
4492
4493 // If we're optimizing, collapse all calls to trap down to just one per
4494 // check-type per function to save on code size.
4495 if ((int)TrapBBs.size() <= CheckHandlerID)
4496 TrapBBs.resize(CheckHandlerID + 1);
4497
4498 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
4499
4500 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
4501 llvm::StringRef TrapMessage;
4502 llvm::StringRef TrapCategory;
4503 auto DebugTrapReasonKind = CGM.getCodeGenOpts().getSanitizeDebugTrapReasons();
4504 if (TR && !TR->isEmpty() &&
4505 DebugTrapReasonKind ==
4507 TrapMessage = TR->getMessage();
4508 TrapCategory = TR->getCategory();
4509 } else {
4510 TrapMessage = GetUBSanTrapForHandler(CheckHandlerID);
4511 TrapCategory = "Undefined Behavior Sanitizer";
4512 }
4513
4514 if (getDebugInfo() && !TrapMessage.empty() &&
4515 DebugTrapReasonKind !=
4517 TrapLocation) {
4518 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
4519 TrapLocation, TrapCategory, TrapMessage);
4520 }
4521
4522 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
4523 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4524
4525 llvm::MDBuilder MDHelper(getLLVMContext());
4526 if (TrapBB && !NoMerge) {
4527 auto Call = TrapBB->begin();
4528 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
4529
4530 Call->applyMergedLocation(Call->getDebugLoc(), TrapLocation);
4531
4532 Builder.CreateCondBr(Checked, Cont, TrapBB,
4533 MDHelper.createLikelyBranchWeights());
4534 } else {
4535 TrapBB = createBasicBlock("trap");
4536 Builder.CreateCondBr(Checked, Cont, TrapBB,
4537 MDHelper.createLikelyBranchWeights());
4538 EmitBlock(TrapBB);
4539
4540 ApplyDebugLocation applyTrapDI(*this, TrapLocation);
4541
4542 llvm::CallInst *TrapCall;
4543 if (CGM.getCodeGenOpts().SanitizeTrapLoop)
4544 TrapCall =
4545 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::looptrap));
4546 else
4547 TrapCall = Builder.CreateCall(
4548 CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
4549 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
4550
4551 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4552 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4553 CGM.getCodeGenOpts().TrapFuncName);
4554 TrapCall->addFnAttr(A);
4555 }
4556 if (NoMerge)
4557 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4558 TrapCall->setDoesNotReturn();
4559 TrapCall->setDoesNotThrow();
4560 Builder.CreateUnreachable();
4561 }
4562
4563 EmitBlock(Cont);
4564}
4565
4566llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
4567 llvm::CallInst *TrapCall =
4568 Builder.CreateCall(CGM.getIntrinsic(IntrID));
4569
4570 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4571 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4572 CGM.getCodeGenOpts().TrapFuncName);
4573 TrapCall->addFnAttr(A);
4574 }
4575
4577 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4578 return TrapCall;
4579}
4580
4582 LValueBaseInfo *BaseInfo,
4583 TBAAAccessInfo *TBAAInfo) {
4584 assert(E->getType()->isArrayType() &&
4585 "Array to pointer decay must have array source type!");
4586
4587 // Expressions of array type can't be bitfields or vector elements.
4588 LValue LV = EmitLValue(E);
4589 Address Addr = LV.getAddress();
4590
4591 // If the array type was an incomplete type, we need to make sure
4592 // the decay ends up being the right type.
4593 llvm::Type *NewTy = ConvertType(E->getType());
4594 Addr = Addr.withElementType(NewTy);
4595
4596 // Note that VLA pointers are always decayed, so we don't need to do
4597 // anything here.
4598 if (!E->getType()->isVariableArrayType()) {
4599 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4600 "Expected pointer to array");
4601
4602 if (getLangOpts().EmitStructuredGEP) {
4603 // Array-to-pointer decay for an SGEP is a no-op as we don't do any
4604 // logical indexing. See #179951 for some additional context.
4605 auto *SGEP =
4606 Builder.CreateStructuredGEP(NewTy, Addr.emitRawPointer(*this), {});
4607 Addr = Address(SGEP, NewTy, Addr.getAlignment(), Addr.isKnownNonNull());
4608 } else {
4609 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4610 }
4611 }
4612
4613 // The result of this decay conversion points to an array element within the
4614 // base lvalue. However, since TBAA currently does not support representing
4615 // accesses to elements of member arrays, we conservatively represent accesses
4616 // to the pointee object as if it had no any base lvalue specified.
4617 // TODO: Support TBAA for member arrays.
4619 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4620 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4621
4622 return Addr.withElementType(ConvertTypeForMem(EltType));
4623}
4624
4625/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4626/// array to pointer, return the array subexpression.
4627static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4628 // If this isn't just an array->pointer decay, bail out.
4629 const auto *CE = dyn_cast<CastExpr>(E);
4630 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4631 return nullptr;
4632
4633 // If this is a decay from variable width array, bail out.
4634 const Expr *SubExpr = CE->getSubExpr();
4635 if (SubExpr->getType()->isVariableArrayType())
4636 return nullptr;
4637
4638 return SubExpr;
4639}
4640
4642 llvm::Type *elemType,
4643 llvm::Value *ptr,
4644 ArrayRef<llvm::Value*> indices,
4645 bool inbounds,
4646 bool signedIndices,
4647 SourceLocation loc,
4648 const llvm::Twine &name = "arrayidx") {
4649 if (inbounds && CGF.getLangOpts().EmitStructuredGEP)
4650 return CGF.Builder.CreateStructuredGEP(elemType, ptr, indices);
4651
4652 if (inbounds) {
4653 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4655 name);
4656 } else {
4657 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4658 }
4659}
4660
4663 llvm::Type *arrayType,
4664 llvm::Type *elementType, bool inbounds,
4665 bool signedIndices, SourceLocation loc,
4666 CharUnits align,
4667 const llvm::Twine &name = "arrayidx") {
4668 if (inbounds && CGF.getLangOpts().EmitStructuredGEP)
4669 return RawAddress(CGF.Builder.CreateStructuredGEP(arrayType,
4670 addr.emitRawPointer(CGF),
4671 indices.drop_front()),
4672 elementType, align);
4673
4674 if (inbounds) {
4675 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4677 align, name);
4678 } else {
4679 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4680 }
4681}
4682
4684 const VariableArrayType *vla) {
4685 QualType eltType;
4686 do {
4687 eltType = vla->getElementType();
4688 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4689 return eltType;
4690}
4691
4693 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4694}
4695
4696static bool hasBPFPreserveStaticOffset(const Expr *E) {
4697 if (!E)
4698 return false;
4699 QualType PointeeType = E->getType()->getPointeeType();
4700 if (PointeeType.isNull())
4701 return false;
4702 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4703 return hasBPFPreserveStaticOffset(BaseDecl);
4704 return false;
4705}
4706
4707// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4709 Address &Addr) {
4710 if (!CGF.getTarget().getTriple().isBPF())
4711 return Addr;
4712
4713 llvm::Function *Fn =
4714 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4715 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4716 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4717}
4718
4719/// Given an array base, check whether its member access belongs to a record
4720/// with preserve_access_index attribute or not.
4721static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4722 if (!ArrayBase || !CGF.getDebugInfo())
4723 return false;
4724
4725 // Only support base as either a MemberExpr or DeclRefExpr.
4726 // DeclRefExpr to cover cases like:
4727 // struct s { int a; int b[10]; };
4728 // struct s *p;
4729 // p[1].a
4730 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4731 // p->b[5] is a MemberExpr example.
4732 const Expr *E = ArrayBase->IgnoreImpCasts();
4733 if (const auto *ME = dyn_cast<MemberExpr>(E))
4734 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4735
4736 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4737 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4738 if (!VarDef)
4739 return false;
4740
4741 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4742 if (!PtrT)
4743 return false;
4744
4745 const auto *PointeeT = PtrT->getPointeeType()
4747 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4748 return RecT->getDecl()
4749 ->getMostRecentDecl()
4750 ->hasAttr<BPFPreserveAccessIndexAttr>();
4751 return false;
4752 }
4753
4754 return false;
4755}
4756
4759 QualType eltType, bool inbounds,
4760 bool signedIndices, SourceLocation loc,
4761 QualType *arrayType = nullptr,
4762 const Expr *Base = nullptr,
4763 const llvm::Twine &name = "arrayidx") {
4764 // All the indices except that last must be zero.
4765#ifndef NDEBUG
4766 for (auto *idx : indices.drop_back())
4767 assert(isa<llvm::ConstantInt>(idx) &&
4768 cast<llvm::ConstantInt>(idx)->isZero());
4769#endif
4770
4771 // Determine the element size of the statically-sized base. This is
4772 // the thing that the indices are expressed in terms of.
4773 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4774 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4775 }
4776
4777 // We can use that to compute the best alignment of the element.
4778 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4779 CharUnits eltAlign =
4780 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4781
4783 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4784
4785 llvm::Value *eltPtr;
4786 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4787 if (!LastIndex ||
4789 addr = emitArraySubscriptGEP(CGF, addr, indices,
4791 : nullptr,
4792 CGF.ConvertTypeForMem(eltType), inbounds,
4793 signedIndices, loc, eltAlign, name);
4794 return addr;
4795 } else {
4796 // Remember the original array subscript for bpf target
4797 unsigned idx = LastIndex->getZExtValue();
4798 llvm::DIType *DbgInfo = nullptr;
4799 if (arrayType)
4800 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4801 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4802 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4803 idx, DbgInfo);
4804 }
4805
4806 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4807}
4808
4809namespace {
4810
4811/// StructFieldAccess is a simple visitor class to grab the first l-value to
4812/// r-value cast Expr.
4813struct StructFieldAccess
4814 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
4815 const Expr *VisitCastExpr(const CastExpr *E) {
4816 if (E->getCastKind() == CK_LValueToRValue)
4817 return E;
4818 return Visit(E->getSubExpr());
4819 }
4820 const Expr *VisitParenExpr(const ParenExpr *E) {
4821 return Visit(E->getSubExpr());
4822 }
4823};
4824
4825} // end anonymous namespace
4826
4827/// The offset of a field from the beginning of the record.
4829 const FieldDecl *Field, int64_t &Offset) {
4830 ASTContext &Ctx = CGF.getContext();
4831 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4832 unsigned FieldNo = 0;
4833
4834 for (const FieldDecl *FD : RD->fields()) {
4835 if (FD == Field) {
4836 Offset += Layout.getFieldOffset(FieldNo);
4837 return true;
4838 }
4839
4840 QualType Ty = FD->getType();
4841 if (Ty->isRecordType())
4842 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4843 Offset += Layout.getFieldOffset(FieldNo);
4844 return true;
4845 }
4846
4847 if (!RD->isUnion())
4848 ++FieldNo;
4849 }
4850
4851 return false;
4852}
4853
4854/// Returns the relative offset difference between \p FD1 and \p FD2.
4855/// \code
4856/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4857/// \endcode
4858/// Both fields must be within the same struct.
4859static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4860 const FieldDecl *FD1,
4861 const FieldDecl *FD2) {
4862 const RecordDecl *FD1OuterRec =
4864 const RecordDecl *FD2OuterRec =
4866
4867 if (FD1OuterRec != FD2OuterRec)
4868 // Fields must be within the same RecordDecl.
4869 return std::optional<int64_t>();
4870
4871 int64_t FD1Offset = 0;
4872 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4873 return std::optional<int64_t>();
4874
4875 int64_t FD2Offset = 0;
4876 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4877 return std::optional<int64_t>();
4878
4879 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4880}
4881
4882/// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by"
4883/// attribute, generate bounds checking code. The "count" field is at the top
4884/// level of the struct or in an anonymous struct, that's also at the top level.
4885/// Future expansions may allow the "count" to reside at any place in the
4886/// struct, but the value of "counted_by" will be a "simple" path to the count,
4887/// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4888/// similar to emit the correct GEP.
4890 const Expr *ArrayExpr, QualType ArrayType, Address ArrayInst,
4891 QualType IndexType, llvm::Value *IndexVal, bool Accessed,
4892 bool FlexibleArray) {
4893 const auto *ME = dyn_cast<MemberExpr>(ArrayExpr->IgnoreImpCasts());
4894 if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType())
4895 return;
4896
4897 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4898 getLangOpts().getStrictFlexArraysLevel();
4899 if (FlexibleArray &&
4900 !ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel))
4901 return;
4902
4903 const FieldDecl *FD = cast<FieldDecl>(ME->getMemberDecl());
4904 const FieldDecl *CountFD = FD->findCountedByField();
4905 if (!CountFD)
4906 return;
4907
4908 if (std::optional<int64_t> Diff =
4909 getOffsetDifferenceInBits(*this, CountFD, FD)) {
4910 if (!ArrayInst.isValid()) {
4911 // An invalid Address indicates we're checking a pointer array access.
4912 // Emit the checked L-Value here.
4913 LValue LV = EmitCheckedLValue(ArrayExpr, TCK_MemberAccess);
4914 ArrayInst = LV.getAddress();
4915 }
4916
4917 // FIXME: The 'static_cast' is necessary, otherwise the result turns into a
4918 // uint64_t, which messes things up if we have a negative offset difference.
4919 Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth());
4920
4921 // Create a GEP with the byte offset between the counted object and the
4922 // count and use that to load the count value.
4923 ArrayInst = Builder.CreatePointerBitCastOrAddrSpaceCast(ArrayInst,
4924 Int8PtrTy, Int8Ty);
4925
4926 llvm::Type *BoundsType = ConvertType(CountFD->getType());
4927 llvm::Value *BoundsVal =
4928 Builder.CreateInBoundsGEP(Int8Ty, ArrayInst.emitRawPointer(*this),
4929 Builder.getInt32(*Diff), ".counted_by.gep");
4930 BoundsVal = Builder.CreateAlignedLoad(BoundsType, BoundsVal, getIntAlign(),
4931 ".counted_by.load");
4932
4933 // Now emit the bounds checking.
4934 EmitBoundsCheckImpl(ArrayExpr, ArrayType, IndexVal, IndexType, BoundsVal,
4935 CountFD->getType(), Accessed);
4936 }
4937}
4938
4940 bool Accessed) {
4941 // The index must always be an integer, which is not an aggregate. Emit it
4942 // in lexical order (this complexity is, sadly, required by C++17).
4943 llvm::Value *IdxPre =
4944 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4945 bool SignedIndices = false;
4946 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4947 auto *Idx = IdxPre;
4948 if (E->getLHS() != E->getIdx()) {
4949 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4950 Idx = EmitScalarExpr(E->getIdx());
4951 }
4952
4953 QualType IdxTy = E->getIdx()->getType();
4954 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4955 SignedIndices |= IdxSigned;
4956
4957 if (SanOpts.has(SanitizerKind::ArrayBounds))
4958 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4959
4960 // Extend or truncate the index type to 32 or 64-bits.
4961 if (Promote && Idx->getType() != IntPtrTy)
4962 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4963
4964 return Idx;
4965 };
4966 IdxPre = nullptr;
4967
4968 // If the base is a vector type, then we are forming a vector element lvalue
4969 // with this subscript.
4970 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4972 // Emit the vector as an lvalue to get its address.
4973 LValue LHS = EmitLValue(E->getBase());
4974 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4975 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4976 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4977 LHS.getBaseInfo(), TBAAAccessInfo());
4978 }
4979
4980 // The HLSL runtime handles subscript expressions on global resource arrays
4981 // and objects with HLSL buffer layouts.
4982 if (getLangOpts().HLSL) {
4983 std::optional<LValue> LV;
4984 if (E->getType()->isHLSLResourceRecord() ||
4986 LV = CGM.getHLSLRuntime().emitResourceArraySubscriptExpr(E, *this);
4987 } else if (E->getType().getAddressSpace() == LangAS::hlsl_constant) {
4988 LV = CGM.getHLSLRuntime().emitBufferArraySubscriptExpr(E, *this,
4989 EmitIdxAfterBase);
4990 }
4991 if (LV.has_value())
4992 return *LV;
4993 }
4994
4995 // All the other cases basically behave like simple offsetting.
4996
4997 // Handle the extvector case we ignored above.
4999 LValue LV = EmitLValue(E->getBase());
5000 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5002
5003 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
5004 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
5005 SignedIndices, E->getExprLoc());
5006 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
5007 CGM.getTBAAInfoForSubobject(LV, EltType));
5008 }
5009
5010 LValueBaseInfo EltBaseInfo;
5011 TBAAAccessInfo EltTBAAInfo;
5013 if (const VariableArrayType *vla =
5014 getContext().getAsVariableArrayType(E->getType())) {
5015 // The base must be a pointer, which is not an aggregate. Emit
5016 // it. It needs to be emitted first in case it's what captures
5017 // the VLA bounds.
5018 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5019 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5020
5021 // The element count here is the total number of non-VLA elements.
5022 llvm::Value *numElements = getVLASize(vla).NumElts;
5023
5024 // Effectively, the multiply by the VLA size is part of the GEP.
5025 // GEP indexes are signed, and scaling an index isn't permitted to
5026 // signed-overflow, so we use the same semantics for our explicit
5027 // multiply. We suppress this if overflow is not undefined behavior.
5028 if (getLangOpts().PointerOverflowDefined) {
5029 Idx = Builder.CreateMul(Idx, numElements);
5030 } else {
5031 Idx = Builder.CreateNSWMul(Idx, numElements);
5032 }
5033
5034 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
5035 !getLangOpts().PointerOverflowDefined,
5036 SignedIndices, E->getExprLoc());
5037
5038 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
5039 // Indexing over an interface, as in "NSString *P; P[4];"
5040
5041 // Emit the base pointer.
5042 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5043 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5044
5045 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
5046 llvm::Value *InterfaceSizeVal =
5047 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
5048
5049 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
5050
5051 // We don't necessarily build correct LLVM struct types for ObjC
5052 // interfaces, so we can't rely on GEP to do this scaling
5053 // correctly, so we need to cast to i8*. FIXME: is this actually
5054 // true? A lot of other things in the fragile ABI would break...
5055 llvm::Type *OrigBaseElemTy = Addr.getElementType();
5056
5057 // Do the GEP.
5058 CharUnits EltAlign =
5059 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
5060 llvm::Value *EltPtr =
5061 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
5062 ScaledIdx, false, SignedIndices, E->getExprLoc());
5063 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
5064 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
5065 // If this is A[i] where A is an array, the frontend will have decayed the
5066 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
5067 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
5068 // "gep x, i" here. Emit one "gep A, 0, i".
5069 assert(Array->getType()->isArrayType() &&
5070 "Array to pointer decay must have array source type!");
5071 LValue ArrayLV;
5072 // For simple multidimensional array indexing, set the 'accessed' flag for
5073 // better bounds-checking of the base expression.
5074 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
5075 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
5076 else
5077 ArrayLV = EmitLValue(Array);
5078 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5079
5080 if (SanOpts.has(SanitizerKind::ArrayBounds))
5081 EmitCountedByBoundsChecking(Array, Array->getType(), ArrayLV.getAddress(),
5082 E->getIdx()->getType(), Idx, Accessed,
5083 /*FlexibleArray=*/true);
5084
5085 // Propagate the alignment from the array itself to the result.
5086 QualType arrayType = Array->getType();
5088 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
5089 E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices,
5090 E->getExprLoc(), &arrayType, E->getBase());
5091 EltBaseInfo = ArrayLV.getBaseInfo();
5092 if (!CGM.getCodeGenOpts().NewStructPathTBAA) {
5093 // Since CodeGenTBAA::getTypeInfoHelper only handles array types for
5094 // new struct path TBAA, we must a use a plain access.
5095 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
5096 } else if (ArrayLV.getTBAAInfo().isMayAlias()) {
5097 EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5098 } else if (ArrayLV.getTBAAInfo().isIncomplete()) {
5099 // The array element is complete, even if the array is not.
5100 EltTBAAInfo = CGM.getTBAAAccessInfo(E->getType());
5101 } else {
5102 // The TBAA access info from the array (base) lvalue is ordinary. We will
5103 // adapt it to create access info for the element.
5104 EltTBAAInfo = ArrayLV.getTBAAInfo();
5105
5106 // We retain the TBAA struct path (BaseType and Offset members) from the
5107 // array. In the TBAA representation, we map any array access to the
5108 // element at index 0, as the index is generally a runtime value. This
5109 // element has the same offset in the base type as the array itself.
5110 // If the array lvalue had no base type, there is no point trying to
5111 // generate one, since an array itself is not a valid base type.
5112
5113 // We also retain the access type from the base lvalue, but the access
5114 // size must be updated to the size of an individual element.
5115 EltTBAAInfo.Size =
5117 }
5118 } else {
5119 // The base must be a pointer; emit it with an estimate of its alignment.
5120 Address BaseAddr =
5121 EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5122 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5123 QualType ptrType = E->getBase()->getType();
5124 Addr = emitArraySubscriptGEP(*this, BaseAddr, Idx, E->getType(),
5125 !getLangOpts().PointerOverflowDefined,
5126 SignedIndices, E->getExprLoc(), &ptrType,
5127 E->getBase());
5128
5129 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
5130 StructFieldAccess Visitor;
5131 const Expr *Base = Visitor.Visit(E->getBase());
5132
5133 if (const auto *CE = dyn_cast_if_present<CastExpr>(Base);
5134 CE && CE->getCastKind() == CK_LValueToRValue)
5136 E->getIdx()->getType(), Idx, Accessed,
5137 /*FlexibleArray=*/false);
5138 }
5139 }
5140
5141 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
5142
5143 if (getLangOpts().ObjC &&
5144 getLangOpts().getGC() != LangOptions::NonGC) {
5147 }
5148 return LV;
5149}
5150
5152 llvm::Value *Idx = EmitScalarExpr(E);
5153 if (Idx->getType() == IntPtrTy)
5154 return Idx;
5155 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
5156 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
5157}
5158
5160 const MatrixSingleSubscriptExpr *E) {
5161 LValue Base = EmitLValue(E->getBase());
5162 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
5163 return LValue::MakeMatrixRow(
5164 MaybeConvertMatrixAddress(Base.getAddress(), *this), RowIdx,
5165 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
5166}
5167
5169 assert(
5170 !E->isIncomplete() &&
5171 "incomplete matrix subscript expressions should be rejected during Sema");
5172 LValue Base = EmitLValue(E->getBase());
5173
5174 // Extend or truncate the index type to 32 or 64-bits if needed.
5175 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
5176 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
5177 llvm::MatrixBuilder MB(Builder);
5178 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
5179 unsigned NumCols = MatrixTy->getNumColumns();
5180 unsigned NumRows = MatrixTy->getNumRows();
5181 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
5183 llvm::Value *FinalIdx =
5184 MB.CreateIndex(RowIdx, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
5185
5186 return LValue::MakeMatrixElt(
5187 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
5188 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
5189}
5190
5192 LValueBaseInfo &BaseInfo,
5193 TBAAAccessInfo &TBAAInfo,
5194 QualType BaseTy, QualType ElTy,
5195 bool IsLowerBound) {
5196 LValue BaseLVal;
5197 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
5198 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
5199 if (BaseTy->isArrayType()) {
5200 Address Addr = BaseLVal.getAddress();
5201 BaseInfo = BaseLVal.getBaseInfo();
5202
5203 // If the array type was an incomplete type, we need to make sure
5204 // the decay ends up being the right type.
5205 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
5206 Addr = Addr.withElementType(NewTy);
5207
5208 // Note that VLA pointers are always decayed, so we don't need to do
5209 // anything here.
5210 if (!BaseTy->isVariableArrayType()) {
5211 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
5212 "Expected pointer to array");
5213 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
5214 }
5215
5216 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
5217 }
5218 LValueBaseInfo TypeBaseInfo;
5219 TBAAAccessInfo TypeTBAAInfo;
5220 CharUnits Align =
5221 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
5222 BaseInfo.mergeForCast(TypeBaseInfo);
5223 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
5224 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
5225 CGF.ConvertTypeForMem(ElTy), Align);
5226 }
5227 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
5228}
5229
5231 bool IsLowerBound) {
5232
5233 assert(!E->isOpenACCArraySection() &&
5234 "OpenACC Array section codegen not implemented");
5235
5237 QualType ResultExprTy;
5238 if (auto *AT = getContext().getAsArrayType(BaseTy))
5239 ResultExprTy = AT->getElementType();
5240 else
5241 ResultExprTy = BaseTy->getPointeeType();
5242 llvm::Value *Idx = nullptr;
5243 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
5244 // Requesting lower bound or upper bound, but without provided length and
5245 // without ':' symbol for the default length -> length = 1.
5246 // Idx = LowerBound ?: 0;
5247 if (auto *LowerBound = E->getLowerBound()) {
5248 Idx = Builder.CreateIntCast(
5249 EmitScalarExpr(LowerBound), IntPtrTy,
5250 LowerBound->getType()->hasSignedIntegerRepresentation());
5251 } else
5252 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
5253 } else {
5254 // Try to emit length or lower bound as constant. If this is possible, 1
5255 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
5256 // IR (LB + Len) - 1.
5257 auto &C = CGM.getContext();
5258 auto *Length = E->getLength();
5259 llvm::APSInt ConstLength;
5260 if (Length) {
5261 // Idx = LowerBound + Length - 1;
5262 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
5263 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
5264 Length = nullptr;
5265 }
5266 auto *LowerBound = E->getLowerBound();
5267 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
5268 if (LowerBound) {
5269 if (std::optional<llvm::APSInt> LB =
5270 LowerBound->getIntegerConstantExpr(C)) {
5271 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
5272 LowerBound = nullptr;
5273 }
5274 }
5275 if (!Length)
5276 --ConstLength;
5277 else if (!LowerBound)
5278 --ConstLowerBound;
5279
5280 if (Length || LowerBound) {
5281 auto *LowerBoundVal =
5282 LowerBound
5283 ? Builder.CreateIntCast(
5284 EmitScalarExpr(LowerBound), IntPtrTy,
5285 LowerBound->getType()->hasSignedIntegerRepresentation())
5286 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
5287 auto *LengthVal =
5288 Length
5289 ? Builder.CreateIntCast(
5290 EmitScalarExpr(Length), IntPtrTy,
5291 Length->getType()->hasSignedIntegerRepresentation())
5292 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
5293 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
5294 /*HasNUW=*/false,
5295 !getLangOpts().PointerOverflowDefined);
5296 if (Length && LowerBound) {
5297 Idx = Builder.CreateSub(
5298 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
5299 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
5300 }
5301 } else
5302 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
5303 } else {
5304 // Idx = ArraySize - 1;
5305 QualType ArrayTy = BaseTy->isPointerType()
5307 : BaseTy;
5308 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
5309 Length = VAT->getSizeExpr();
5310 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
5311 ConstLength = *L;
5312 Length = nullptr;
5313 }
5314 } else {
5315 auto *CAT = C.getAsConstantArrayType(ArrayTy);
5316 assert(CAT && "unexpected type for array initializer");
5317 ConstLength = CAT->getSize();
5318 }
5319 if (Length) {
5320 auto *LengthVal = Builder.CreateIntCast(
5321 EmitScalarExpr(Length), IntPtrTy,
5322 Length->getType()->hasSignedIntegerRepresentation());
5323 Idx = Builder.CreateSub(
5324 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
5325 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
5326 } else {
5327 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
5328 --ConstLength;
5329 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
5330 }
5331 }
5332 }
5333 assert(Idx);
5334
5335 Address EltPtr = Address::invalid();
5336 LValueBaseInfo BaseInfo;
5337 TBAAAccessInfo TBAAInfo;
5338 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
5339 // The base must be a pointer, which is not an aggregate. Emit
5340 // it. It needs to be emitted first in case it's what captures
5341 // the VLA bounds.
5342 Address Base =
5343 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
5344 BaseTy, VLA->getElementType(), IsLowerBound);
5345 // The element count here is the total number of non-VLA elements.
5346 llvm::Value *NumElements = getVLASize(VLA).NumElts;
5347
5348 // Effectively, the multiply by the VLA size is part of the GEP.
5349 // GEP indexes are signed, and scaling an index isn't permitted to
5350 // signed-overflow, so we use the same semantics for our explicit
5351 // multiply. We suppress this if overflow is not undefined behavior.
5352 if (getLangOpts().PointerOverflowDefined)
5353 Idx = Builder.CreateMul(Idx, NumElements);
5354 else
5355 Idx = Builder.CreateNSWMul(Idx, NumElements);
5356 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
5357 !getLangOpts().PointerOverflowDefined,
5358 /*signedIndices=*/false, E->getExprLoc());
5359 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
5360 // If this is A[i] where A is an array, the frontend will have decayed the
5361 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
5362 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
5363 // "gep x, i" here. Emit one "gep A, 0, i".
5364 assert(Array->getType()->isArrayType() &&
5365 "Array to pointer decay must have array source type!");
5366 LValue ArrayLV;
5367 // For simple multidimensional array indexing, set the 'accessed' flag for
5368 // better bounds-checking of the base expression.
5369 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
5370 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
5371 else
5372 ArrayLV = EmitLValue(Array);
5373
5374 // Propagate the alignment from the array itself to the result.
5375 EltPtr = emitArraySubscriptGEP(
5376 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
5377 ResultExprTy, !getLangOpts().PointerOverflowDefined,
5378 /*signedIndices=*/false, E->getExprLoc());
5379 BaseInfo = ArrayLV.getBaseInfo();
5380 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
5381 } else {
5382 Address Base =
5383 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
5384 ResultExprTy, IsLowerBound);
5385 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
5386 !getLangOpts().PointerOverflowDefined,
5387 /*signedIndices=*/false, E->getExprLoc());
5388 }
5389
5390 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
5391}
5392
5395 // Emit the base vector as an l-value.
5396 LValue Base;
5397
5398 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
5399 if (E->isArrow()) {
5400 // If it is a pointer to a vector, emit the address and form an lvalue with
5401 // it.
5402 LValueBaseInfo BaseInfo;
5403 TBAAAccessInfo TBAAInfo;
5404 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
5405 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
5406 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
5407 Base.getQuals().removeObjCGCAttr();
5408 } else if (E->getBase()->isGLValue()) {
5409 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
5410 // emit the base as an lvalue.
5411 assert(E->getBase()->getType()->isVectorType());
5412 Base = EmitLValue(E->getBase());
5413 } else {
5414 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
5415 assert(E->getBase()->getType()->isVectorType() &&
5416 "Result must be a vector");
5417 llvm::Value *Vec = EmitScalarExpr(E->getBase());
5418
5419 // Store the vector to memory (because LValue wants an address).
5420 Address VecMem = CreateMemTemp(E->getBase()->getType());
5421 // need to zero extend an hlsl boolean vector to store it back to memory
5422 QualType Ty = E->getBase()->getType();
5423 llvm::Type *LTy = convertTypeForLoadStore(Ty, Vec->getType());
5424 if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits())
5425 Vec = Builder.CreateZExt(Vec, LTy);
5426 Builder.CreateStore(Vec, VecMem);
5428 }
5429
5430 QualType type =
5431 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
5432
5433 // Encode the element access list into a vector of unsigned indices.
5435 E->getEncodedElementAccess(Indices);
5436
5437 if (Base.isSimple()) {
5438 llvm::Constant *CV =
5439 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
5440 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
5441 Base.getBaseInfo(), TBAAAccessInfo());
5442 }
5443
5444 if (Base.isMatrixRow()) {
5445 if (auto *RowIdx =
5446 llvm::dyn_cast<llvm::ConstantInt>(Base.getMatrixRowIdx())) {
5448 QualType MatTy = Base.getType();
5449 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
5450 unsigned NumCols = Indices.size();
5451 unsigned NumRows = MT->getNumRows();
5452 unsigned Row = RowIdx->getZExtValue();
5453 QualType VecQT = E->getBase()->getType();
5454 if (NumCols != MT->getNumColumns()) {
5455 const auto *EVT = VecQT->getAs<ExtVectorType>();
5456 QualType ElemQT = EVT->getElementType();
5457 VecQT = getContext().getExtVectorType(ElemQT, NumCols);
5458 }
5459 for (unsigned C = 0; C < NumCols; ++C) {
5460 unsigned Col = Indices[C];
5461 unsigned Linear = Col * NumRows + Row;
5462 MatIndices.push_back(llvm::ConstantInt::get(Int32Ty, Linear));
5463 }
5464
5465 llvm::Constant *ConstIdxs = llvm::ConstantVector::get(MatIndices);
5466 return LValue::MakeExtVectorElt(Base.getMatrixAddress(), ConstIdxs, VecQT,
5467 Base.getBaseInfo(), TBAAAccessInfo());
5468 }
5469 llvm::Constant *Cols =
5470 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
5471 // Note: intentionally not using E.getType() so we can reuse isMatrixRow()
5472 // implementations in EmitLoadOfLValue & EmitStoreThroughLValue and don't
5473 // need the LValue to have its own number of rows and columns when the
5474 // type is a vector.
5476 Base.getMatrixAddress(), Base.getMatrixRowIdx(), Cols, Base.getType(),
5477 Base.getBaseInfo(), TBAAAccessInfo());
5478 }
5479
5480 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
5481
5482 llvm::Constant *BaseElts = Base.getExtVectorElts();
5484
5485 for (unsigned Index : Indices)
5486 CElts.push_back(BaseElts->getAggregateElement(Index));
5487 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
5488 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
5489 Base.getBaseInfo(), TBAAAccessInfo());
5490}
5491
5493 const Expr *UnderlyingBaseExpr = E->IgnoreParens();
5494 while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(UnderlyingBaseExpr))
5495 UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens();
5496 return getContext().isSentinelNullExpr(UnderlyingBaseExpr);
5497}
5498
5500 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
5502 return EmitDeclRefLValue(DRE);
5503 }
5504 if (getLangOpts().HLSL &&
5506 // We have an HLSL buffer - emit using HLSL's layout rules.
5507 return CGM.getHLSLRuntime().emitBufferMemberExpr(*this, E);
5508 }
5509
5510 Expr *BaseExpr = E->getBase();
5511 // Check whether the underlying base pointer is a constant null.
5512 // If so, we do not set inbounds flag for GEP to avoid breaking some
5513 // old-style offsetof idioms.
5514 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
5516 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
5517 LValue BaseLV;
5518 if (E->isArrow()) {
5519 LValueBaseInfo BaseInfo;
5520 TBAAAccessInfo TBAAInfo;
5521 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
5522 QualType PtrTy = BaseExpr->getType()->getPointeeType();
5523 SanitizerSet SkippedChecks;
5524 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
5525 if (IsBaseCXXThis)
5526 SkippedChecks.set(SanitizerKind::Alignment, true);
5527 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
5528 SkippedChecks.set(SanitizerKind::Null, true);
5530 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
5531 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
5532 } else
5533 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
5534
5535 NamedDecl *ND = E->getMemberDecl();
5536 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
5537 LValue LV = EmitLValueForField(BaseLV, Field, IsInBounds);
5539 if (getLangOpts().OpenMP) {
5540 // If the member was explicitly marked as nontemporal, mark it as
5541 // nontemporal. If the base lvalue is marked as nontemporal, mark access
5542 // to children as nontemporal too.
5543 if ((IsWrappedCXXThis(BaseExpr) &&
5544 CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
5545 BaseLV.isNontemporal())
5546 LV.setNontemporal(/*Value=*/true);
5547 }
5548 return LV;
5549 }
5550
5551 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
5552 return EmitFunctionDeclLValue(*this, E, FD);
5553
5554 llvm_unreachable("Unhandled member declaration!");
5555}
5556
5557/// Given that we are currently emitting a lambda, emit an l-value for
5558/// one of its members.
5559///
5561 llvm::Value *ThisValue) {
5562 bool HasExplicitObjectParameter = false;
5563 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
5564 if (MD) {
5565 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
5566 assert(MD->getParent()->isLambda());
5567 assert(MD->getParent() == Field->getParent());
5568 }
5569 LValue LambdaLV;
5570 if (HasExplicitObjectParameter) {
5571 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
5572 auto It = LocalDeclMap.find(D);
5573 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
5574 Address AddrOfExplicitObject = It->getSecond();
5575 if (D->getType()->isReferenceType())
5576 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
5578 else
5579 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
5581
5582 // Make sure we have an lvalue to the lambda itself and not a derived class.
5583 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
5584 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
5585 if (ThisTy != LambdaTy) {
5586 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
5588 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
5589 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
5591 LambdaLV = MakeAddrLValue(Base, T);
5592 }
5593 } else {
5594 CanQualType LambdaTagType =
5595 getContext().getCanonicalTagType(Field->getParent());
5596 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
5597 }
5598 return EmitLValueForField(LambdaLV, Field);
5599}
5600
5602 return EmitLValueForLambdaField(Field, CXXABIThisValue);
5603}
5604
5605/// Get the field index in the debug info. The debug info structure/union
5606/// will ignore the unnamed bitfields.
5608 unsigned FieldIndex) {
5609 unsigned I = 0, Skipped = 0;
5610
5611 for (auto *F : Rec->getDefinition()->fields()) {
5612 if (I == FieldIndex)
5613 break;
5614 if (F->isUnnamedBitField())
5615 Skipped++;
5616 I++;
5617 }
5618
5619 return FieldIndex - Skipped;
5620}
5621
5622/// Get the address of a zero-sized field within a record. The resulting
5623/// address doesn't necessarily have the right type.
5625 const FieldDecl *Field,
5626 bool IsInBounds) {
5628 CGF.getContext().getFieldOffset(Field));
5629 if (Offset.isZero())
5630 return Base;
5631 Base = Base.withElementType(CGF.Int8Ty);
5632 if (!IsInBounds)
5633 return CGF.Builder.CreateConstByteGEP(Base, Offset);
5634 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
5635}
5636
5637/// Drill down to the storage of a field without walking into reference types,
5638/// and without respect for pointer field protection.
5639///
5640/// The resulting address doesn't necessarily have the right type.
5642 const FieldDecl *field,
5643 bool IsInBounds) {
5644 if (isEmptyFieldForLayout(CGF.getContext(), field))
5645 return emitAddrOfZeroSizeField(CGF, base, field, IsInBounds);
5646
5647 const RecordDecl *rec = field->getParent();
5648
5649 unsigned idx =
5650 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5651 llvm::Type *StructType =
5653
5654 if (CGF.getLangOpts().EmitStructuredGEP)
5655 return RawAddress(
5656 CGF.Builder.CreateStructuredGEP(StructType, base.emitRawPointer(CGF),
5657 {CGF.Builder.getSize(idx)}),
5658 base.getElementType(), base.getAlignment());
5659
5660 if (!IsInBounds)
5661 return CGF.Builder.CreateConstGEP2_32(base, 0, idx, field->getName());
5662
5663 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
5664}
5665
5666/// Drill down to the storage of a field without walking into reference types,
5667/// wrapping the address in an llvm.protected.field.ptr intrinsic for the
5668/// pointer field protection feature if necessary.
5669///
5670/// The resulting address doesn't necessarily have the right type.
5672 const FieldDecl *field, bool IsInBounds) {
5673 Address Addr = emitRawAddrOfFieldStorage(CGF, base, field, IsInBounds);
5674
5675 if (!CGF.getContext().isPFPField(field))
5676 return Addr;
5677
5678 return CGF.EmitAddressOfPFPField(base, Addr, field);
5679}
5680
5682 Address addr, const FieldDecl *field) {
5683 const RecordDecl *rec = field->getParent();
5684 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
5685 base.getType(), rec->getLocation());
5686
5687 unsigned idx =
5688 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5689
5691 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
5692}
5693
5694static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
5695 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
5696 if (!RD)
5697 return false;
5698
5699 if (RD->isDynamicClass())
5700 return true;
5701
5702 for (const auto &Base : RD->bases())
5703 if (hasAnyVptr(Base.getType(), Context))
5704 return true;
5705
5706 for (const FieldDecl *Field : RD->fields())
5707 if (hasAnyVptr(Field->getType(), Context))
5708 return true;
5709
5710 return false;
5711}
5712
5714 bool IsInBounds) {
5715 LValueBaseInfo BaseInfo = base.getBaseInfo();
5716
5717 if (field->isBitField()) {
5718 const CGRecordLayout &RL =
5719 CGM.getTypes().getCGRecordLayout(field->getParent());
5720 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
5721 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
5722 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
5723 Info.VolatileStorageSize != 0 &&
5724 field->getType()
5727 Address Addr = base.getAddress();
5728 unsigned Idx = RL.getLLVMFieldNo(field);
5729 const RecordDecl *rec = field->getParent();
5732 if (!UseVolatile) {
5733 if (!IsInPreservedAIRegion &&
5734 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5735 if (Idx != 0) {
5736 // For structs, we GEP to the field that the record layout suggests.
5737 if (!IsInBounds)
5738 Addr = Builder.CreateConstGEP2_32(Addr, 0, Idx, field->getName());
5739 else
5740 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
5741 }
5742 } else {
5743 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
5744 getContext().getCanonicalTagType(rec), rec->getLocation());
5745 Addr = Builder.CreatePreserveStructAccessIndex(
5746 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
5747 DbgInfo);
5748 }
5749 }
5750 const unsigned SS =
5751 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
5752 // Get the access type.
5753 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
5754 Addr = Addr.withElementType(FieldIntTy);
5755 if (UseVolatile) {
5756 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
5757 if (VolatileOffset)
5758 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
5759 }
5760
5761 QualType fieldType =
5762 field->getType().withCVRQualifiers(base.getVRQualifiers());
5763 // TODO: Support TBAA for bit fields.
5764 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
5765 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
5766 TBAAAccessInfo());
5767 }
5768
5769 // Fields of may-alias structures are may-alias themselves.
5770 // FIXME: this should get propagated down through anonymous structs
5771 // and unions.
5772 QualType FieldType = field->getType();
5773 const RecordDecl *rec = field->getParent();
5774 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
5775 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
5776 TBAAAccessInfo FieldTBAAInfo;
5777 if (base.getTBAAInfo().isMayAlias() ||
5778 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
5779 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5780 } else if (rec->isUnion()) {
5781 // TODO: Support TBAA for unions.
5782 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5783 } else {
5784 // If no base type been assigned for the base access, then try to generate
5785 // one for this base lvalue.
5786 FieldTBAAInfo = base.getTBAAInfo();
5787 if (!FieldTBAAInfo.BaseType) {
5788 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
5789 assert(!FieldTBAAInfo.Offset &&
5790 "Nonzero offset for an access with no base type!");
5791 }
5792
5793 // Adjust offset to be relative to the base type.
5794 const ASTRecordLayout &Layout =
5796 unsigned CharWidth = getContext().getCharWidth();
5797 if (FieldTBAAInfo.BaseType)
5798 FieldTBAAInfo.Offset +=
5799 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
5800
5801 // Update the final access type and size.
5802 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
5803 FieldTBAAInfo.Size =
5805 }
5806
5807 Address addr = base.getAddress();
5809 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
5810 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
5811 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5812 ClassDef->isDynamicClass()) {
5813 // Getting to any field of dynamic object requires stripping dynamic
5814 // information provided by invariant.group. This is because accessing
5815 // fields may leak the real address of dynamic object, which could result
5816 // in miscompilation when leaked pointer would be compared.
5817 auto *stripped =
5818 Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
5819 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5820 }
5821 }
5822
5823 unsigned RecordCVR = base.getVRQualifiers();
5824 if (rec->isUnion()) {
5825 // For unions, there is no pointer adjustment.
5826 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5827 hasAnyVptr(FieldType, getContext()))
5828 // Because unions can easily skip invariant.barriers, we need to add
5829 // a barrier every time CXXRecord field with vptr is referenced.
5830 addr = Builder.CreateLaunderInvariantGroup(addr);
5831
5833 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5834 // Remember the original union field index
5835 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5836 rec->getLocation());
5837 addr =
5838 Address(Builder.CreatePreserveUnionAccessIndex(
5839 addr.emitRawPointer(*this),
5840 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5841 addr.getElementType(), addr.getAlignment());
5842 }
5843
5844 if (FieldType->isReferenceType())
5845 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5846 } else {
5847 if (!IsInPreservedAIRegion &&
5848 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5849 // For structs, we GEP to the field that the record layout suggests.
5850 addr = emitAddrOfFieldStorage(*this, addr, field, IsInBounds);
5851 else
5852 // Remember the original struct field index
5853 addr = emitPreserveStructAccess(*this, base, addr, field);
5854 }
5855
5856 // If this is a reference field, load the reference right now.
5857 if (FieldType->isReferenceType()) {
5858 LValue RefLVal =
5859 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5860 if (RecordCVR & Qualifiers::Volatile)
5861 RefLVal.getQuals().addVolatile();
5862 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5863
5864 // Qualifiers on the struct don't apply to the referencee.
5865 RecordCVR = 0;
5866 FieldType = FieldType->getPointeeType();
5867 }
5868
5869 // Make sure that the address is pointing to the right type. This is critical
5870 // for both unions and structs.
5871 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5872
5873 if (field->hasAttr<AnnotateAttr>())
5874 addr = EmitFieldAnnotations(field, addr);
5875
5876 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5877 LV.getQuals().addCVRQualifiers(RecordCVR);
5878
5879 // __weak attribute on a field is ignored.
5882
5883 return LV;
5884}
5885
5886LValue
5888 const FieldDecl *Field) {
5889 QualType FieldType = Field->getType();
5890
5891 if (!FieldType->isReferenceType())
5892 return EmitLValueForField(Base, Field);
5893
5895 *this, Base.getAddress(), Field,
5896 /*IsInBounds=*/!getLangOpts().PointerOverflowDefined);
5897
5898 // Make sure that the address is pointing to the right type.
5899 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5900 V = V.withElementType(llvmType);
5901
5902 // TODO: Generate TBAA information that describes this access as a structure
5903 // member access and not just an access to an object of the field's type. This
5904 // should be similar to what we do in EmitLValueForField().
5905 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5906 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5907 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5908 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5909 CGM.getTBAAInfoForSubobject(Base, FieldType));
5910}
5911
5913 if (E->isFileScope()) {
5914 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5915 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5916 }
5917 if (E->getType()->isVariablyModifiedType())
5918 // make sure to emit the VLA size.
5920
5921 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5922 const Expr *InitExpr = E->getInitializer();
5924
5925 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5926 /*Init*/ true);
5927
5928 // Block-scope compound literals are destroyed at the end of the enclosing
5929 // scope in C.
5930 if (!getLangOpts().CPlusPlus)
5933 E->getType(), getDestroyer(DtorKind),
5934 DtorKind & EHCleanup);
5935
5936 return Result;
5937}
5938
5940 if (!E->isGLValue())
5941 // Initializing an aggregate temporary in C++11: T{...}.
5942 return EmitAggExprToLValue(E);
5943
5944 // An lvalue initializer list must be initializing a reference.
5945 assert(E->isTransparent() && "non-transparent glvalue init list");
5946 return EmitLValue(E->getInit(0));
5947}
5948
5949/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5950/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5951/// LValue is returned and the current block has been terminated.
5952static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5953 const Expr *Operand) {
5954 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5955 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5956 return std::nullopt;
5957 }
5958
5959 return CGF.EmitLValue(Operand);
5960}
5961
5962namespace {
5963// Handle the case where the condition is a constant evaluatable simple integer,
5964// which means we don't have to separately handle the true/false blocks.
5965std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5966 CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
5967 const Expr *condExpr = E->getCond();
5968 bool CondExprBool;
5969 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5970 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5971 if (!CondExprBool)
5972 std::swap(Live, Dead);
5973
5974 if (!CGF.ContainsLabel(Dead)) {
5975 // If the true case is live, we need to track its region.
5976 CGF.incrementProfileCounter(CondExprBool ? CGF.UseExecPath
5977 : CGF.UseSkipPath,
5978 E, /*UseBoth=*/true);
5979 CGF.markStmtMaybeUsed(Dead);
5980 // If a throw expression we emit it and return an undefined lvalue
5981 // because it can't be used.
5982 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5983 CGF.EmitCXXThrowExpr(ThrowExpr);
5984 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5985 llvm::Type *Ty = CGF.DefaultPtrTy;
5986 return CGF.MakeAddrLValue(
5987 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5988 Dead->getType());
5989 }
5990 return CGF.EmitLValue(Live);
5991 }
5992 }
5993 return std::nullopt;
5994}
5995struct ConditionalInfo {
5996 llvm::BasicBlock *lhsBlock, *rhsBlock;
5997 std::optional<LValue> LHS, RHS;
5998};
5999
6000// Create and generate the 3 blocks for a conditional operator.
6001// Leaves the 'current block' in the continuation basic block.
6002template<typename FuncTy>
6003ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
6004 const AbstractConditionalOperator *E,
6005 const FuncTy &BranchGenFunc) {
6006 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
6007 CGF.createBasicBlock("cond.false"), std::nullopt,
6008 std::nullopt};
6009 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
6010
6012 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
6013 CGF.getProfileCount(E));
6014
6015 // Any temporaries created here are conditional.
6016 CGF.EmitBlock(Info.lhsBlock);
6018 eval.begin(CGF);
6019 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
6020 eval.end(CGF);
6021 Info.lhsBlock = CGF.Builder.GetInsertBlock();
6022
6023 if (Info.LHS)
6024 CGF.Builder.CreateBr(endBlock);
6025
6026 // Any temporaries created here are conditional.
6027 CGF.EmitBlock(Info.rhsBlock);
6029 eval.begin(CGF);
6030 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
6031 eval.end(CGF);
6032 Info.rhsBlock = CGF.Builder.GetInsertBlock();
6033 CGF.EmitBlock(endBlock);
6034
6035 return Info;
6036}
6037} // namespace
6038
6040 const AbstractConditionalOperator *E) {
6041 if (!E->isGLValue()) {
6042 // ?: here should be an aggregate.
6043 assert(hasAggregateEvaluationKind(E->getType()) &&
6044 "Unexpected conditional operator!");
6045 return (void)EmitAggExprToLValue(E);
6046 }
6047
6048 OpaqueValueMapping binding(*this, E);
6049 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
6050 return;
6051
6052 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
6053 CGF.EmitIgnoredExpr(E);
6054 return LValue{};
6055 });
6056}
6059 if (!expr->isGLValue()) {
6060 // ?: here should be an aggregate.
6061 assert(hasAggregateEvaluationKind(expr->getType()) &&
6062 "Unexpected conditional operator!");
6063 return EmitAggExprToLValue(expr);
6064 }
6065
6066 OpaqueValueMapping binding(*this, expr);
6067 if (std::optional<LValue> Res =
6068 HandleConditionalOperatorLValueSimpleCase(*this, expr))
6069 return *Res;
6070
6071 ConditionalInfo Info = EmitConditionalBlocks(
6072 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
6073 return EmitLValueOrThrowExpression(CGF, E);
6074 });
6075
6076 if ((Info.LHS && !Info.LHS->isSimple()) ||
6077 (Info.RHS && !Info.RHS->isSimple()))
6078 return EmitUnsupportedLValue(expr, "conditional operator");
6079
6080 if (Info.LHS && Info.RHS) {
6081 Address lhsAddr = Info.LHS->getAddress();
6082 Address rhsAddr = Info.RHS->getAddress();
6084 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
6085 Builder.GetInsertBlock(), expr->getType());
6086 AlignmentSource alignSource =
6087 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
6088 Info.RHS->getBaseInfo().getAlignmentSource());
6089 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
6090 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
6091 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
6092 TBAAInfo);
6093 } else {
6094 assert((Info.LHS || Info.RHS) &&
6095 "both operands of glvalue conditional are throw-expressions?");
6096 return Info.LHS ? *Info.LHS : *Info.RHS;
6097 }
6098}
6099
6100/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
6101/// type. If the cast is to a reference, we can have the usual lvalue result,
6102/// otherwise if a cast is needed by the code generator in an lvalue context,
6103/// then it must mean that we need the address of an aggregate in order to
6104/// access one of its members. This can happen for all the reasons that casts
6105/// are permitted with aggregate result, including noop aggregate casts, and
6106/// cast from scalar to union.
6108 llvm::scope_exit RestoreCurCast([this, Prev = CurCast] { CurCast = Prev; });
6109 CurCast = E;
6110 switch (E->getCastKind()) {
6111 case CK_ToVoid:
6112 case CK_BitCast:
6113 case CK_LValueToRValueBitCast:
6114 case CK_ArrayToPointerDecay:
6115 case CK_FunctionToPointerDecay:
6116 case CK_NullToMemberPointer:
6117 case CK_NullToPointer:
6118 case CK_IntegralToPointer:
6119 case CK_PointerToIntegral:
6120 case CK_PointerToBoolean:
6121 case CK_IntegralCast:
6122 case CK_BooleanToSignedIntegral:
6123 case CK_IntegralToBoolean:
6124 case CK_IntegralToFloating:
6125 case CK_FloatingToIntegral:
6126 case CK_FloatingToBoolean:
6127 case CK_FloatingCast:
6128 case CK_FloatingRealToComplex:
6129 case CK_FloatingComplexToReal:
6130 case CK_FloatingComplexToBoolean:
6131 case CK_FloatingComplexCast:
6132 case CK_FloatingComplexToIntegralComplex:
6133 case CK_IntegralRealToComplex:
6134 case CK_IntegralComplexToReal:
6135 case CK_IntegralComplexToBoolean:
6136 case CK_IntegralComplexCast:
6137 case CK_IntegralComplexToFloatingComplex:
6138 case CK_DerivedToBaseMemberPointer:
6139 case CK_BaseToDerivedMemberPointer:
6140 case CK_MemberPointerToBoolean:
6141 case CK_ReinterpretMemberPointer:
6142 case CK_AnyPointerToBlockPointerCast:
6143 case CK_ARCProduceObject:
6144 case CK_ARCConsumeObject:
6145 case CK_ARCReclaimReturnedObject:
6146 case CK_ARCExtendBlockObject:
6147 case CK_CopyAndAutoreleaseBlockObject:
6148 case CK_IntToOCLSampler:
6149 case CK_FloatingToFixedPoint:
6150 case CK_FixedPointToFloating:
6151 case CK_FixedPointCast:
6152 case CK_FixedPointToBoolean:
6153 case CK_FixedPointToIntegral:
6154 case CK_IntegralToFixedPoint:
6155 case CK_MatrixCast:
6156 case CK_HLSLVectorTruncation:
6157 case CK_HLSLMatrixTruncation:
6158 case CK_HLSLArrayRValue:
6159 case CK_HLSLElementwiseCast:
6160 case CK_HLSLAggregateSplatCast:
6161 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
6162
6163 case CK_Dependent:
6164 llvm_unreachable("dependent cast kind in IR gen!");
6165
6166 case CK_BuiltinFnToFnPtr:
6167 llvm_unreachable("builtin functions are handled elsewhere");
6168
6169 // These are never l-values; just use the aggregate emission code.
6170 case CK_NonAtomicToAtomic:
6171 case CK_AtomicToNonAtomic:
6172 return EmitAggExprToLValue(E);
6173
6174 case CK_Dynamic: {
6175 LValue LV = EmitLValue(E->getSubExpr());
6176 Address V = LV.getAddress();
6177 const auto *DCE = cast<CXXDynamicCastExpr>(E);
6179 }
6180
6181 case CK_ConstructorConversion:
6182 case CK_UserDefinedConversion:
6183 case CK_CPointerToObjCPointerCast:
6184 case CK_BlockPointerToObjCPointerCast:
6185 case CK_LValueToRValue:
6186 return EmitLValue(E->getSubExpr());
6187
6188 case CK_NoOp: {
6189 // CK_NoOp can model a qualification conversion, which can remove an array
6190 // bound and change the IR type.
6191 // FIXME: Once pointee types are removed from IR, remove this.
6192 LValue LV = EmitLValue(E->getSubExpr());
6193 // Propagate the volatile qualifer to LValue, if exist in E.
6195 LV.getQuals() = E->getType().getQualifiers();
6196 if (LV.isSimple()) {
6197 Address V = LV.getAddress();
6198 if (V.isValid()) {
6199 llvm::Type *T = ConvertTypeForMem(E->getType());
6200 if (V.getElementType() != T)
6201 LV.setAddress(V.withElementType(T));
6202 }
6203 }
6204 return LV;
6205 }
6206
6207 case CK_UncheckedDerivedToBase:
6208 case CK_DerivedToBase: {
6209 auto *DerivedClassDecl = E->getSubExpr()->getType()->castAsCXXRecordDecl();
6210 LValue LV = EmitLValue(E->getSubExpr());
6211 Address This = LV.getAddress();
6212
6213 // Perform the derived-to-base conversion
6215 This, DerivedClassDecl, E->path_begin(), E->path_end(),
6216 /*NullCheckValue=*/false, E->getExprLoc());
6217
6218 // TODO: Support accesses to members of base classes in TBAA. For now, we
6219 // conservatively pretend that the complete object is of the base class
6220 // type.
6221 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
6222 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6223 }
6224 case CK_ToUnion:
6225 return EmitAggExprToLValue(E);
6226 case CK_BaseToDerived: {
6227 auto *DerivedClassDecl = E->getType()->castAsCXXRecordDecl();
6228 LValue LV = EmitLValue(E->getSubExpr());
6229
6230 // Perform the base-to-derived conversion
6232 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
6233 /*NullCheckValue=*/false);
6234
6235 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
6236 // performed and the object is not of the derived type.
6239 E->getType());
6240
6241 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
6242 EmitVTablePtrCheckForCast(E->getType(), Derived,
6243 /*MayBeNull=*/false, CFITCK_DerivedCast,
6244 E->getBeginLoc());
6245
6246 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
6247 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6248 }
6249 case CK_LValueBitCast: {
6250 // This must be a reinterpret_cast (or c-style equivalent).
6251 const auto *CE = cast<ExplicitCastExpr>(E);
6252
6253 CGM.EmitExplicitCastExprType(CE, this);
6254 LValue LV = EmitLValue(E->getSubExpr());
6256 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
6257
6258 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
6260 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
6261 E->getBeginLoc());
6262
6263 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
6264 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6265 }
6266 case CK_AddressSpaceConversion: {
6267 LValue LV = EmitLValue(E->getSubExpr());
6268 QualType DestTy = getContext().getPointerType(E->getType());
6269 llvm::Value *V =
6270 performAddrSpaceCast(LV.getPointer(*this), ConvertType(DestTy));
6272 LV.getAddress().getAlignment()),
6273 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
6274 }
6275 case CK_ObjCObjectLValueCast: {
6276 LValue LV = EmitLValue(E->getSubExpr());
6278 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
6279 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6280 }
6281 case CK_ZeroToOCLOpaqueType:
6282 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
6283
6284 case CK_VectorSplat: {
6285 // LValue results of vector splats are only supported in HLSL.
6286 if (!getLangOpts().HLSL)
6287 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
6288 return EmitLValue(E->getSubExpr());
6289 }
6290 }
6291
6292 llvm_unreachable("Unhandled lvalue cast kind?");
6293}
6294
6299
6300std::pair<LValue, LValue>
6302 // Emitting the casted temporary through an opaque value.
6303 LValue BaseLV = EmitLValue(E->getArgLValue());
6305
6306 QualType ExprTy = E->getType();
6307 Address OutTemp = CreateIRTempWithoutCast(ExprTy);
6308 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
6309
6310 if (E->isInOut())
6312 TempLV);
6313
6315 return std::make_pair(BaseLV, TempLV);
6316}
6317
6319 CallArgList &Args, QualType Ty) {
6320
6321 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
6322
6323 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
6324 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
6325
6327
6328 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
6329 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast());
6330 Args.add(RValue::get(TmpAddr, *this), Ty);
6331 return TempLV;
6332}
6333
6334LValue
6337
6338 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
6339 it = OpaqueLValues.find(e);
6340
6341 if (it != OpaqueLValues.end())
6342 return it->second;
6343
6344 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
6345 return EmitLValue(e->getSourceExpr());
6346}
6347
6348RValue
6351
6352 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
6353 it = OpaqueRValues.find(e);
6354
6355 if (it != OpaqueRValues.end())
6356 return it->second;
6357
6358 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
6359 return EmitAnyExpr(e->getSourceExpr());
6360}
6361
6364 return OpaqueLValues.contains(E);
6365 return OpaqueRValues.contains(E);
6366}
6367
6369 const FieldDecl *FD,
6370 SourceLocation Loc) {
6371 QualType FT = FD->getType();
6372 LValue FieldLV = EmitLValueForField(LV, FD);
6373 switch (getEvaluationKind(FT)) {
6374 case TEK_Complex:
6375 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
6376 case TEK_Aggregate:
6377 return FieldLV.asAggregateRValue();
6378 case TEK_Scalar:
6379 // This routine is used to load fields one-by-one to perform a copy, so
6380 // don't load reference fields.
6381 if (FD->getType()->isReferenceType())
6382 return RValue::get(FieldLV.getPointer(*this));
6383 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
6384 // primitive load.
6385 if (FieldLV.isBitField())
6386 return EmitLoadOfLValue(FieldLV, Loc);
6387 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
6388 }
6389 llvm_unreachable("bad evaluation kind");
6390}
6391
6392//===--------------------------------------------------------------------===//
6393// Expression Emission
6394//===--------------------------------------------------------------------===//
6395
6398 llvm::CallBase **CallOrInvoke) {
6399 llvm::CallBase *CallOrInvokeStorage;
6400 if (!CallOrInvoke) {
6401 CallOrInvoke = &CallOrInvokeStorage;
6402 }
6403
6404 llvm::scope_exit AddCoroElideSafeOnExit([&] {
6405 if (E->isCoroElideSafe()) {
6406 auto *I = *CallOrInvoke;
6407 if (I)
6408 I->addFnAttr(llvm::Attribute::CoroElideSafe);
6409 }
6410 });
6411
6412 // Builtins never have block type.
6413 if (E->getCallee()->getType()->isBlockPointerType())
6414 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
6415
6416 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
6417 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
6418
6419 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
6420 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
6421
6422 // A CXXOperatorCallExpr is created even for explicit object methods, but
6423 // these should be treated like static function call.
6424 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
6425 if (const auto *MD =
6426 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
6427 MD && MD->isImplicitObjectMemberFunction())
6428 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
6429
6430 CGCallee callee = EmitCallee(E->getCallee());
6431
6432 if (callee.isBuiltin()) {
6433 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
6434 E, ReturnValue);
6435 }
6436
6437 if (callee.isPseudoDestructor()) {
6439 }
6440
6441 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
6442 /*Chain=*/nullptr, CallOrInvoke);
6443}
6444
6445/// Emit a CallExpr without considering whether it might be a subclass.
6448 llvm::CallBase **CallOrInvoke) {
6449 CGCallee Callee = EmitCallee(E->getCallee());
6450 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
6451 /*Chain=*/nullptr, CallOrInvoke);
6452}
6453
6454// Detect the unusual situation where an inline version is shadowed by a
6455// non-inline version. In that case we should pick the external one
6456// everywhere. That's GCC behavior too.
6458 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
6459 if (!PD->isInlineBuiltinDeclaration())
6460 return false;
6461 return true;
6462}
6463
6465 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
6466
6467 if (auto builtinID = FD->getBuiltinID()) {
6468 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
6469 std::string NoBuiltins = "no-builtins";
6470
6471 StringRef Ident = CGF.CGM.getMangledName(GD);
6472 std::string FDInlineName = (Ident + ".inline").str();
6473
6474 bool IsPredefinedLibFunction =
6476 bool HasAttributeNoBuiltin =
6477 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
6478 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
6479
6480 // When directing calling an inline builtin, call it through it's mangled
6481 // name to make it clear it's not the actual builtin.
6482 if (CGF.CurFn->getName() != FDInlineName &&
6484 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6485 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
6486 llvm::Module *M = Fn->getParent();
6487 llvm::Function *Clone = M->getFunction(FDInlineName);
6488 if (!Clone) {
6489 Clone = llvm::Function::Create(Fn->getFunctionType(),
6490 llvm::GlobalValue::InternalLinkage,
6491 Fn->getAddressSpace(), FDInlineName, M);
6492 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
6493 }
6494 return CGCallee::forDirect(Clone, GD);
6495 }
6496
6497 // Replaceable builtins provide their own implementation of a builtin. If we
6498 // are in an inline builtin implementation, avoid trivial infinite
6499 // recursion. Honor __attribute__((no_builtin("foo"))) or
6500 // __attribute__((no_builtin)) on the current function unless foo is
6501 // not a predefined library function which means we must generate the
6502 // builtin no matter what.
6503 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
6504 return CGCallee::forBuiltin(builtinID, FD);
6505 }
6506
6507 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6508 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
6509 FD->hasAttr<CUDAGlobalAttr>())
6510 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
6511 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
6512
6513 return CGCallee::forDirect(CalleePtr, GD);
6514}
6515
6517 if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6519 return GlobalDecl(FD);
6520}
6521
6523 E = E->IgnoreParens();
6524
6525 // Look through function-to-pointer decay.
6526 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
6527 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
6528 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
6529 return EmitCallee(ICE->getSubExpr());
6530 }
6531
6532 // Try to remember the original __ptrauth qualifier for loads of
6533 // function pointers.
6534 if (ICE->getCastKind() == CK_LValueToRValue) {
6535 const Expr *SubExpr = ICE->getSubExpr();
6536 if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) {
6537 std::pair<llvm::Value *, CGPointerAuthInfo> Result =
6539
6541 assert(FunctionType->isFunctionType());
6542
6543 GlobalDecl GD;
6544 if (const auto *VD =
6545 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) {
6546 GD = GlobalDecl(VD);
6547 }
6549 CGCallee Callee(CalleeInfo, Result.first, Result.second);
6550 return Callee;
6551 }
6552 }
6553
6554 // Resolve direct calls.
6555 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
6556 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
6558 }
6559 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
6560 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
6561 EmitIgnoredExpr(ME->getBase());
6562 return EmitDirectCallee(*this, FD);
6563 }
6564
6565 // Look through template substitutions.
6566 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
6567 return EmitCallee(NTTP->getReplacement());
6568
6569 // Treat pseudo-destructor calls differently.
6570 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
6572 }
6573
6574 // Otherwise, we have an indirect reference.
6575 llvm::Value *calleePtr;
6577 if (auto ptrType = E->getType()->getAs<PointerType>()) {
6578 calleePtr = EmitScalarExpr(E);
6579 functionType = ptrType->getPointeeType();
6580 } else {
6581 functionType = E->getType();
6582 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
6583 }
6584 assert(functionType->isFunctionType());
6585
6586 GlobalDecl GD;
6587 if (const auto *VD =
6588 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
6589 GD = GlobalDecl(VD);
6590
6591 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
6592 CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
6593 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
6594 return callee;
6595}
6596
6598 // Comma expressions just emit their LHS then their RHS as an l-value.
6599 if (E->getOpcode() == BO_Comma) {
6600 EmitIgnoredExpr(E->getLHS());
6602 return EmitLValue(E->getRHS());
6603 }
6604
6605 if (E->getOpcode() == BO_PtrMemD ||
6606 E->getOpcode() == BO_PtrMemI)
6608
6609 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
6610
6611 // Create a Key Instructions source location atom group that covers both
6612 // LHS and RHS expressions. Nested RHS expressions may get subsequently
6613 // separately grouped (1 below):
6614 //
6615 // 1. `a = b = c` -> Two atoms.
6616 // 2. `x = new(1)` -> One atom (for both addr store and value store).
6617 // 3. Complex and agg assignment -> One atom.
6619
6620 // Note that in all of these cases, __block variables need the RHS
6621 // evaluated first just in case the variable gets moved by the RHS.
6622
6623 switch (getEvaluationKind(E->getType())) {
6624 case TEK_Scalar: {
6625 if (PointerAuthQualifier PtrAuth =
6626 E->getLHS()->getType().getPointerAuth()) {
6628 LValue CopiedLV = LV;
6629 CopiedLV.getQuals().removePointerAuth();
6630 llvm::Value *RV =
6631 EmitPointerAuthQualify(PtrAuth, E->getRHS(), CopiedLV.getAddress());
6632 EmitNullabilityCheck(CopiedLV, RV, E->getExprLoc());
6633 EmitStoreThroughLValue(RValue::get(RV), CopiedLV);
6634 return LV;
6635 }
6636
6637 switch (E->getLHS()->getType().getObjCLifetime()) {
6639 return EmitARCStoreStrong(E, /*ignored*/ false).first;
6640
6642 return EmitARCStoreAutoreleasing(E).first;
6643
6644 // No reason to do any of these differently.
6648 break;
6649 }
6650
6651 // TODO: Can we de-duplicate this code with the corresponding code in
6652 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
6653 RValue RV;
6654 llvm::Value *Previous = nullptr;
6655 QualType SrcType = E->getRHS()->getType();
6656 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
6657 // we want to extract that value and potentially (if the bitfield sanitizer
6658 // is enabled) use it to check for an implicit conversion.
6659 if (E->getLHS()->refersToBitField()) {
6660 llvm::Value *RHS =
6662 RV = RValue::get(RHS);
6663 } else
6664 RV = EmitAnyExpr(E->getRHS());
6665
6667
6668 if (RV.isScalar())
6670
6671 if (LV.isBitField()) {
6672 llvm::Value *Result = nullptr;
6673 // If bitfield sanitizers are enabled we want to use the result
6674 // to check whether a truncation or sign change has occurred.
6675 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
6677 else
6679
6680 // If the expression contained an implicit conversion, make sure
6681 // to use the value before the scalar conversion.
6682 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
6683 QualType DstType = E->getLHS()->getType();
6684 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
6685 LV.getBitFieldInfo(), E->getExprLoc());
6686 } else
6687 EmitStoreThroughLValue(RV, LV);
6688
6689 if (getLangOpts().OpenMP)
6690 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
6691 E->getLHS());
6692 return LV;
6693 }
6694
6695 case TEK_Complex:
6697
6698 case TEK_Aggregate:
6699 // If the lang opt is HLSL and the LHS is a constant array
6700 // then we are performing a copy assignment and call a special
6701 // function because EmitAggExprToLValue emits to a temporary LValue
6703 return EmitHLSLArrayAssignLValue(E);
6704
6705 return EmitAggExprToLValue(E);
6706 }
6707 llvm_unreachable("bad evaluation kind");
6708}
6709
6710// This function implements trivial copy assignment for HLSL's
6711// assignable constant arrays.
6713 // Don't emit an LValue for the RHS because it might not be an LValue
6714 LValue LHS = EmitLValue(E->getLHS());
6715
6716 // If the RHS is a global resource array, copy all individual resources
6717 // into LHS.
6719 if (CGM.getHLSLRuntime().emitResourceArrayCopy(LHS, E->getRHS(), *this))
6720 return LHS;
6721
6722 // In C the RHS of an assignment operator is an RValue.
6723 // EmitAggregateAssign takes an LValue for the RHS. Instead we can call
6724 // EmitInitializationToLValue to emit an RValue into an LValue.
6726 return LHS;
6727}
6728
6730 llvm::CallBase **CallOrInvoke) {
6731 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
6732
6733 if (!RV.isScalar())
6734 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6736
6737 assert(E->getCallReturnType(getContext())->isReferenceType() &&
6738 "Can't have a scalar return unless the return type is a "
6739 "reference type!");
6740
6742}
6743
6745 // FIXME: This shouldn't require another copy.
6746 return EmitAggExprToLValue(E);
6747}
6748
6751 && "binding l-value to type which needs a temporary");
6752 AggValueSlot Slot = CreateAggTemp(E->getType());
6753 EmitCXXConstructExpr(E, Slot);
6755}
6756
6757LValue
6761
6763 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
6764 .withElementType(ConvertType(E->getType()));
6765}
6766
6771
6772LValue
6780
6783
6784 if (!RV.isScalar())
6785 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6787
6788 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
6789 "Can't have a scalar return unless the return type is a "
6790 "reference type!");
6791
6793}
6794
6796 Address V =
6797 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
6799}
6800
6802 const ObjCIvarDecl *Ivar) {
6803 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
6804}
6805
6806llvm::Value *
6808 const ObjCIvarDecl *Ivar) {
6809 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
6810 QualType PointerDiffType = getContext().getPointerDiffType();
6811 return Builder.CreateZExtOrTrunc(OffsetValue,
6812 getTypes().ConvertType(PointerDiffType));
6813}
6814
6816 llvm::Value *BaseValue,
6817 const ObjCIvarDecl *Ivar,
6818 unsigned CVRQualifiers) {
6819 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
6820 Ivar, CVRQualifiers);
6821}
6822
6824 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
6825 llvm::Value *BaseValue = nullptr;
6826 const Expr *BaseExpr = E->getBase();
6827 Qualifiers BaseQuals;
6828 QualType ObjectTy;
6829 if (E->isArrow()) {
6830 BaseValue = EmitScalarExpr(BaseExpr);
6831 ObjectTy = BaseExpr->getType()->getPointeeType();
6832 BaseQuals = ObjectTy.getQualifiers();
6833 } else {
6834 LValue BaseLV = EmitLValue(BaseExpr);
6835 BaseValue = BaseLV.getPointer(*this);
6836 ObjectTy = BaseExpr->getType();
6837 BaseQuals = ObjectTy.getQualifiers();
6838 }
6839
6840 LValue LV =
6841 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
6842 BaseQuals.getCVRQualifiers());
6844 return LV;
6845}
6846
6848 // Can only get l-value for message expression returning aggregate type
6849 RValue RV = EmitAnyExprToTemp(E);
6850 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6852}
6853
6855 const CGCallee &OrigCallee, const CallExpr *E,
6857 llvm::Value *Chain,
6858 llvm::CallBase **CallOrInvoke,
6859 CGFunctionInfo const **ResolvedFnInfo) {
6860 // Get the actual function type. The callee type will always be a pointer to
6861 // function type or a block pointer type.
6862 assert(CalleeType->isFunctionPointerType() &&
6863 "Call must have function pointer type!");
6864
6865 const Decl *TargetDecl =
6866 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
6867
6868 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
6869 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
6870 "trying to emit a call to an immediate function");
6871
6872 CalleeType = getContext().getCanonicalType(CalleeType);
6873
6874 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
6875
6876 CGCallee Callee = OrigCallee;
6877
6878 bool CFIUnchecked = CalleeType->hasPointeeToCFIUncheckedCalleeFunctionType();
6879
6880 if (SanOpts.has(SanitizerKind::Function) &&
6881 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6882 !isa<FunctionNoProtoType>(PointeeType) && !CFIUnchecked) {
6883 if (llvm::Constant *PrefixSig =
6884 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
6885 auto CheckOrdinal = SanitizerKind::SO_Function;
6886 auto CheckHandler = SanitizerHandler::FunctionTypeMismatch;
6887 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6888 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6889
6890 llvm::Type *PrefixSigType = PrefixSig->getType();
6891 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6892 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6893
6894 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6895 if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
6896 // Use raw pointer since we are using the callee pointer as data here.
6897 Address Addr =
6898 Address(CalleePtr, CalleePtr->getType(),
6900 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6901 Callee.getPointerAuthInfo(), nullptr);
6902 CalleePtr = Addr.emitRawPointer(*this);
6903 }
6904
6905 // On 32-bit Arm, the low bit of a function pointer indicates whether
6906 // it's using the Arm or Thumb instruction set. The actual first
6907 // instruction lives at the same address either way, so we must clear
6908 // that low bit before using the function address to find the prefix
6909 // structure.
6910 //
6911 // This applies to both Arm and Thumb target triples, because
6912 // either one could be used in an interworking context where it
6913 // might be passed function pointers of both types.
6914 llvm::Value *AlignedCalleePtr;
6915 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6916 AlignedCalleePtr = Builder.CreateIntrinsic(
6917 CalleePtr->getType(), llvm::Intrinsic::ptrmask,
6918 {CalleePtr, llvm::ConstantInt::getSigned(IntPtrTy, ~1)});
6919 } else {
6920 AlignedCalleePtr = CalleePtr;
6921 }
6922
6923 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6924 llvm::Value *CalleeSigPtr =
6925 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6926 llvm::Value *CalleeSig =
6927 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6928 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6929
6930 llvm::BasicBlock *Cont = createBasicBlock("cont");
6931 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6932 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6933
6934 EmitBlock(TypeCheck);
6935 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6936 Int32Ty,
6937 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6938 getPointerAlign());
6939 llvm::Value *CalleeTypeHashMatch =
6940 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6941 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6942 EmitCheckTypeDescriptor(CalleeType)};
6943 EmitCheck(std::make_pair(CalleeTypeHashMatch, CheckOrdinal), CheckHandler,
6944 StaticData, {CalleePtr});
6945
6946 Builder.CreateBr(Cont);
6947 EmitBlock(Cont);
6948 }
6949 }
6950
6951 const auto *FnType = cast<FunctionType>(PointeeType);
6952
6953 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
6954 FD && DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6955 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType);
6956
6957 // If we are checking indirect calls and this call is indirect, check that the
6958 // function pointer is a member of the bit set for the function type.
6959 if (SanOpts.has(SanitizerKind::CFIICall) &&
6960 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && !CFIUnchecked) {
6961 auto CheckOrdinal = SanitizerKind::SO_CFIICall;
6962 auto CheckHandler = SanitizerHandler::CFICheckFail;
6963 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6964 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6965
6966 llvm::Metadata *MD =
6967 CGM.CreateMetadataIdentifierForFnType(QualType(FnType, 0));
6968
6969 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6970
6971 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6972 llvm::Value *TypeTest = Builder.CreateCall(
6973 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6974
6975 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6976 llvm::Constant *StaticData[] = {
6977 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6980 };
6981 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6982 EmitCfiSlowPathCheck(CheckOrdinal, TypeTest, CrossDsoTypeId, CalleePtr,
6983 StaticData);
6984 } else {
6985 EmitCheck(std::make_pair(TypeTest, CheckOrdinal), CheckHandler,
6986 StaticData, {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6987 }
6988 }
6989
6990 CallArgList Args;
6991 if (Chain)
6992 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6993
6994 // C++17 requires that we evaluate arguments to a call using assignment syntax
6995 // right-to-left, and that we evaluate arguments to certain other operators
6996 // left-to-right. Note that we allow this to override the order dictated by
6997 // the calling convention on the MS ABI, which means that parameter
6998 // destruction order is not necessarily reverse construction order.
6999 // FIXME: Revisit this based on C++ committee response to unimplementability.
7001 bool StaticOperator = false;
7002 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
7003 if (OCE->isAssignmentOp())
7005 else {
7006 switch (OCE->getOperator()) {
7007 case OO_LessLess:
7008 case OO_GreaterGreater:
7009 case OO_AmpAmp:
7010 case OO_PipePipe:
7011 case OO_Comma:
7012 case OO_ArrowStar:
7014 break;
7015 default:
7016 break;
7017 }
7018 }
7019
7020 if (const auto *MD =
7021 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
7022 MD && MD->isStatic())
7023 StaticOperator = true;
7024 }
7025
7026 auto Arguments = E->arguments();
7027 if (StaticOperator) {
7028 // If we're calling a static operator, we need to emit the object argument
7029 // and ignore it.
7030 EmitIgnoredExpr(E->getArg(0));
7031 Arguments = drop_begin(Arguments, 1);
7032 }
7033 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
7034 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
7035
7036 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
7037 Args, FnType, /*ChainCall=*/Chain);
7038
7039 if (ResolvedFnInfo)
7040 *ResolvedFnInfo = &FnInfo;
7041
7042 // HIP function pointer contains kernel handle when it is used in triple
7043 // chevron. The kernel stub needs to be loaded from kernel handle and used
7044 // as callee.
7045 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
7047 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
7048 llvm::Value *Handle = Callee.getFunctionPointer();
7049 auto *Stub = Builder.CreateLoad(
7050 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
7051 Callee.setFunctionPointer(Stub);
7052 }
7053
7054 // Insert function pointer lookup if this is a target call
7055 //
7056 // This is used for the indirect function case, virtual function case is
7057 // handled in ItaniumCXXABI.cpp
7058 if (getLangOpts().OpenMPIsTargetDevice && CGM.getTriple().isGPU() &&
7059 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
7060 const Expr *CalleeExpr = E->getCallee()->IgnoreParenImpCasts();
7061 const DeclRefExpr *DRE = nullptr;
7062 while (CalleeExpr) {
7063 if ((DRE = dyn_cast<DeclRefExpr>(CalleeExpr)))
7064 break;
7065 if (const auto *ME = dyn_cast<MemberExpr>(CalleeExpr))
7066 CalleeExpr = ME->getBase()->IgnoreParenImpCasts();
7067 else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(CalleeExpr))
7068 CalleeExpr = ASE->getBase()->IgnoreParenImpCasts();
7069 else
7070 break;
7071 }
7072
7073 const auto *VD = DRE ? dyn_cast<VarDecl>(DRE->getDecl()) : nullptr;
7074 if (VD && VD->hasAttr<OMPTargetIndirectCallAttr>()) {
7075 auto *PtrTy = CGM.VoidPtrTy;
7076 llvm::Type *RtlFnArgs[] = {PtrTy};
7077 llvm::FunctionCallee DeviceRtlFn = CGM.CreateRuntimeFunction(
7078 llvm::FunctionType::get(PtrTy, RtlFnArgs, false),
7079 "__llvm_omp_indirect_call_lookup");
7080 llvm::Value *Func = Callee.getFunctionPointer();
7081 llvm::Type *BackupTy = Func->getType();
7082 Func = Builder.CreatePointerBitCastOrAddrSpaceCast(Func, PtrTy);
7083 Func = EmitRuntimeCall(DeviceRtlFn, {Func});
7084 Func = Builder.CreatePointerBitCastOrAddrSpaceCast(Func, BackupTy);
7085 Callee.setFunctionPointer(Func);
7086 }
7087 }
7088
7089 llvm::CallBase *LocalCallOrInvoke = nullptr;
7090 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
7091 E == MustTailCall, E->getExprLoc());
7092
7093 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
7094 if (CalleeDecl->hasAttr<RestrictAttr>() ||
7095 CalleeDecl->hasAttr<MallocSpanAttr>() ||
7096 CalleeDecl->hasAttr<AllocSizeAttr>()) {
7097 // Function has 'malloc' (aka. 'restrict') or 'alloc_size' attribute.
7098 if (SanOpts.has(SanitizerKind::AllocToken)) {
7099 // Set !alloc_token metadata.
7100 EmitAllocToken(LocalCallOrInvoke, E);
7101 }
7102 }
7103 }
7104 if (CallOrInvoke)
7105 *CallOrInvoke = LocalCallOrInvoke;
7106
7107 return Call;
7108}
7109
7112 Address BaseAddr = Address::invalid();
7113 if (E->getOpcode() == BO_PtrMemI) {
7114 BaseAddr = EmitPointerWithAlignment(E->getLHS());
7115 } else {
7116 BaseAddr = EmitLValue(E->getLHS()).getAddress();
7117 }
7118
7119 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
7120 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
7121
7122 LValueBaseInfo BaseInfo;
7123 TBAAAccessInfo TBAAInfo;
7124 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
7127 E, BaseAddr, OffsetV, MPT, IsInBounds, &BaseInfo, &TBAAInfo);
7128
7129 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
7130}
7131
7132/// Given the address of a temporary variable, produce an r-value of
7133/// its type.
7135 QualType type,
7136 SourceLocation loc) {
7138 switch (getEvaluationKind(type)) {
7139 case TEK_Complex:
7140 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
7141 case TEK_Aggregate:
7142 return lvalue.asAggregateRValue();
7143 case TEK_Scalar:
7144 return RValue::get(EmitLoadOfScalar(lvalue, loc));
7145 }
7146 llvm_unreachable("bad evaluation kind");
7147}
7148
7149void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
7150 assert(Val->getType()->isFPOrFPVectorTy());
7151 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
7152 return;
7153
7154 llvm::MDBuilder MDHelper(getLLVMContext());
7155 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
7156
7157 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
7158}
7159
7161 llvm::Type *EltTy = Val->getType()->getScalarType();
7162 if (!EltTy->isFloatTy() && !EltTy->isHalfTy())
7163 return;
7164
7165 if ((getLangOpts().OpenCL &&
7166 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
7167 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
7168 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
7169 // OpenCL v1.1 s7.4: minimum accuracy of single precision sqrt is 3 ulp.
7170 // OpenCL v3.0 s7.4: minimum accuracy of half precision sqrt is 1.5 ulp.
7171 //
7172 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
7173 // build option allows an application to specify that single precision
7174 // floating-point divide (x/y and 1/x) and sqrt used in the program
7175 // source are correctly rounded.
7176 //
7177 // TODO: CUDA has a prec-sqrt flag
7178 SetFPAccuracy(Val, EltTy->isFloatTy() ? 3.0f : 1.5f);
7179 }
7180}
7181
7183 llvm::Type *EltTy = Val->getType()->getScalarType();
7184 if (!EltTy->isFloatTy() && !EltTy->isHalfTy())
7185 return;
7186
7187 if ((getLangOpts().OpenCL &&
7188 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
7189 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
7190 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
7191 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5 ulp.
7192 // OpenCL v3.0 s7.4: minimum accuracy of half precision / is 1 ulp.
7193 //
7194 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
7195 // build option allows an application to specify that single precision
7196 // floating-point divide (x/y and 1/x) and sqrt used in the program
7197 // source are correctly rounded.
7198 //
7199 // TODO: CUDA has a prec-div flag
7200 SetFPAccuracy(Val, EltTy->isFloatTy() ? 2.5f : 1.f);
7201 }
7202}
7203
7204namespace {
7205 struct LValueOrRValue {
7206 LValue LV;
7207 RValue RV;
7208 };
7209}
7210
7211static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
7212 const PseudoObjectExpr *E,
7213 bool forLValue,
7214 AggValueSlot slot) {
7216
7217 // Find the result expression, if any.
7218 const Expr *resultExpr = E->getResultExpr();
7219 LValueOrRValue result;
7220
7222 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
7223 const Expr *semantic = *i;
7224
7225 // If this semantic expression is an opaque value, bind it
7226 // to the result of its source expression.
7227 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
7228 // Skip unique OVEs.
7229 if (ov->isUnique()) {
7230 assert(ov != resultExpr &&
7231 "A unique OVE cannot be used as the result expression");
7232 continue;
7233 }
7234
7235 // If this is the result expression, we may need to evaluate
7236 // directly into the slot.
7238 OVMA opaqueData;
7239 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
7241 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
7242 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
7244 opaqueData = OVMA::bind(CGF, ov, LV);
7245 result.RV = slot.asRValue();
7246
7247 // Otherwise, emit as normal.
7248 } else {
7249 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
7250
7251 // If this is the result, also evaluate the result now.
7252 if (ov == resultExpr) {
7253 if (forLValue)
7254 result.LV = CGF.EmitLValue(ov);
7255 else
7256 result.RV = CGF.EmitAnyExpr(ov, slot);
7257 }
7258 }
7259
7260 opaques.push_back(opaqueData);
7261
7262 // Otherwise, if the expression is the result, evaluate it
7263 // and remember the result.
7264 } else if (semantic == resultExpr) {
7265 if (forLValue)
7266 result.LV = CGF.EmitLValue(semantic);
7267 else
7268 result.RV = CGF.EmitAnyExpr(semantic, slot);
7269
7270 // Otherwise, evaluate the expression in an ignored context.
7271 } else {
7272 CGF.EmitIgnoredExpr(semantic);
7273 }
7274 }
7275
7276 // Unbind all the opaques now.
7277 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques)
7278 opaque.unbind(CGF);
7279
7280 return result;
7281}
7282
7284 AggValueSlot slot) {
7285 return emitPseudoObjectExpr(*this, E, false, slot).RV;
7286}
7287
7291
7293 LValue Val, SmallVectorImpl<LValue> &AccessList) {
7294
7296 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
7297 WorkList;
7298 llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32);
7299 WorkList.push_back({Val, Val.getType(), {llvm::ConstantInt::get(IdxTy, 0)}});
7300
7301 while (!WorkList.empty()) {
7302 auto [LVal, T, IdxList] = WorkList.pop_back_val();
7303 T = T.getCanonicalType().getUnqualifiedType();
7304 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) {
7305 uint64_t Size = CAT->getZExtSize();
7306 for (int64_t I = Size - 1; I > -1; I--) {
7307 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7308 IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
7309 WorkList.emplace_back(LVal, CAT->getElementType(), IdxListCopy);
7310 }
7311 } else if (const auto *RT = dyn_cast<RecordType>(T)) {
7312 const RecordDecl *Record = RT->getDecl()->getDefinitionOrSelf();
7313 assert(!Record->isUnion() && "Union types not supported in flat cast.");
7314
7315 const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
7316
7318 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
7319 ReverseList;
7320 if (CXXD && CXXD->isStandardLayout())
7322
7323 // deal with potential base classes
7324 if (CXXD && !CXXD->isStandardLayout()) {
7325 if (CXXD->getNumBases() > 0) {
7326 assert(CXXD->getNumBases() == 1 &&
7327 "HLSL doesn't support multiple inheritance.");
7328 auto Base = CXXD->bases_begin();
7329 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7330 IdxListCopy.push_back(llvm::ConstantInt::get(
7331 IdxTy, 0)); // base struct should be at index zero
7332 ReverseList.emplace_back(LVal, Base->getType(), IdxListCopy);
7333 }
7334 }
7335
7336 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(Record);
7337
7338 llvm::Type *LLVMT = ConvertTypeForMem(T);
7340 LValue RLValue;
7341 bool createdGEP = false;
7342 for (auto *FD : Record->fields()) {
7343 if (FD->isBitField()) {
7344 if (FD->isUnnamedBitField())
7345 continue;
7346 if (!createdGEP) {
7347 createdGEP = true;
7348 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
7349 LLVMT, Align, "gep");
7350 RLValue = MakeAddrLValue(GEP, T);
7351 }
7352 LValue FieldLVal = EmitLValueForField(RLValue, FD, true);
7353 ReverseList.push_back({FieldLVal, FD->getType(), {}});
7354 } else {
7355 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7356 IdxListCopy.push_back(
7357 llvm::ConstantInt::get(IdxTy, Layout.getLLVMFieldNo(FD)));
7358 ReverseList.emplace_back(LVal, FD->getType(), IdxListCopy);
7359 }
7360 }
7361
7362 std::reverse(ReverseList.begin(), ReverseList.end());
7363 llvm::append_range(WorkList, ReverseList);
7364 } else if (const auto *VT = dyn_cast<VectorType>(T)) {
7365 llvm::Type *LLVMT = ConvertTypeForMem(T);
7367 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT,
7368 Align, "vector.gep");
7369 LValue Base = MakeAddrLValue(GEP, T);
7370 for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) {
7371 llvm::Constant *Idx = llvm::ConstantInt::get(IdxTy, I);
7372 LValue LV =
7373 LValue::MakeVectorElt(Base.getAddress(), Idx, VT->getElementType(),
7374 Base.getBaseInfo(), TBAAAccessInfo());
7375 AccessList.emplace_back(LV);
7376 }
7377 } else if (const auto *MT = dyn_cast<ConstantMatrixType>(T)) {
7378 // Matrices are represented as flat arrays in memory, but has a vector
7379 // value type. So we use ConvertMatrixAddress to convert the address from
7380 // array to vector, and extract elements similar to the vector case above.
7381 // The matrix elements are iterated over in row-major order regardless of
7382 // the memory layout of the matrix.
7383 llvm::Type *LLVMT = ConvertTypeForMem(T);
7385 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT,
7386 Align, "matrix.gep");
7387 LValue Base = MakeAddrLValue(GEP, T);
7388 Address MatAddr = MaybeConvertMatrixAddress(Base.getAddress(), *this);
7389 unsigned NumRows = MT->getNumRows();
7390 unsigned NumCols = MT->getNumColumns();
7391 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
7393 llvm::MatrixBuilder MB(Builder);
7394 for (unsigned Row = 0; Row < MT->getNumRows(); Row++) {
7395 for (unsigned Col = 0; Col < MT->getNumColumns(); Col++) {
7396 llvm::Value *RowIdx = llvm::ConstantInt::get(IdxTy, Row);
7397 llvm::Value *ColIdx = llvm::ConstantInt::get(IdxTy, Col);
7398 llvm::Value *Idx = MB.CreateIndex(RowIdx, ColIdx, NumRows, NumCols,
7399 IsMatrixRowMajor);
7400 LValue LV =
7401 LValue::MakeMatrixElt(MatAddr, Idx, MT->getElementType(),
7402 Base.getBaseInfo(), TBAAAccessInfo());
7403 AccessList.emplace_back(LV);
7404 }
7405 }
7406 } else { // a scalar/builtin type
7407 if (!IdxList.empty()) {
7408 llvm::Type *LLVMT = ConvertTypeForMem(T);
7410 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
7411 LLVMT, Align, "gep");
7412 AccessList.emplace_back(MakeAddrLValue(GEP, T));
7413 } else // must be a bitfield we already created an lvalue for
7414 AccessList.emplace_back(LVal);
7415 }
7416 }
7417}
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition CGExpr.cpp:3214
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition CGExpr.cpp:3483
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition CGExpr.cpp:720
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition CGExpr.cpp:4627
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition CGExpr.cpp:4828
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition CGExpr.cpp:4692
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type?
Definition CGExpr.cpp:1909
@ CEK_AsReferenceOnly
Definition CGExpr.cpp:1911
@ CEK_AsValueOnly
Definition CGExpr.cpp:1913
@ CEK_None
Definition CGExpr.cpp:1910
@ CEK_AsValueOrReference
Definition CGExpr.cpp:1912
static Address emitRawAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field, bool IsInBounds)
Drill down to the storage of a field without walking into reference types, and without respect for po...
Definition CGExpr.cpp:5641
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition CGExpr.cpp:1882
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition CGExpr.cpp:3471
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition CGExpr.cpp:5952
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition CGExpr.cpp:4063
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition CGExpr.cpp:4641
SmallVector< llvm::Value *, 8 > RecIndicesTy
Definition CGExpr.cpp:1160
static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD)
Definition CGExpr.cpp:6516
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition CGExpr.cpp:3458
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition CGExpr.cpp:2284
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition CGExpr.cpp:7211
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition CGExpr.cpp:4708
static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID)
Definition CGExpr.cpp:93
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition CGExpr.cpp:1012
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition CGExpr.cpp:2462
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition CGExpr.cpp:1915
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field, bool IsInBounds)
Get the address of a zero-sized field within a record.
Definition CGExpr.cpp:5624
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field, bool IsInBounds)
Drill down to the storage of a field without walking into reference types, wrapping the address in an...
Definition CGExpr.cpp:5671
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition CGExpr.cpp:2054
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition CGExpr.cpp:4859
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition CGExpr.cpp:6464
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition CGExpr.cpp:3311
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition CGExpr.cpp:1162
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition CGExpr.cpp:6457
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition CGExpr.cpp:3407
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition CGExpr.cpp:5694
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition CGExpr.cpp:4721
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition CGExpr.cpp:3325
VariableTypeDescriptorKind
Definition CGExpr.cpp:78
@ TK_Float
A floating-point type.
Definition CGExpr.cpp:82
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition CGExpr.cpp:86
@ TK_Integer
An integer type.
Definition CGExpr.cpp:80
@ TK_BitInt
An _BitInt(N) type.
Definition CGExpr.cpp:84
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition CGExpr.cpp:2383
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition CGExpr.cpp:1452
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition CGExpr.cpp:5681
const SanitizerHandlerInfo SanitizerHandlers[]
Definition CGExpr.cpp:4080
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition CGExpr.cpp:4086
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition CGExpr.cpp:5191
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
FormatToken * Previous
The previous token in the unwrapped line.
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
Defines the clang::Module class, which describes a module in the source code.
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
#define LIST_SANITIZER_CHECKS
SanitizerHandler
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
a trap message and trap category.
const LValueBase getLValueBase() const
Definition APValue.cpp:1015
bool isLValue() const
Definition APValue.h:490
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
SourceManager & getSourceManager()
Definition ASTContext.h:858
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
bool isPFPField(const FieldDecl *Field) const
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
Builtin::Context & BuiltinInfo
Definition ASTContext.h:799
const LangOptions & getLangOpts() const
Definition ASTContext.h:951
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
unsigned getTargetAddressSpace(LangAS AS) const
bool isSentinelNullExpr(const Expr *E)
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition Expr.h:7218
Expr * getBase()
Get base of the array section.
Definition Expr.h:7296
Expr * getLength()
Get length of array section.
Definition Expr.h:7306
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition Expr.cpp:5366
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:7335
Expr * getLowerBound()
Get lower bound of array section.
Definition Expr.h:7300
bool isOpenACCArraySection() const
Definition Expr.h:7293
SourceLocation getColonLocFirst() const
Definition Expr.h:7327
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2779
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2753
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3730
QualType getElementType() const
Definition TypeBase.h:3742
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
SourceLocation getExprLoc() const
Definition Expr.h:4082
Expr * getRHS() const
Definition Expr.h:4093
static bool isAdditiveOp(Opcode Opc)
Definition Expr.h:4127
Opcode getOpcode() const
Definition Expr.h:4086
A fixed int type of a specified bitwidth.
Definition TypeBase.h:8240
unsigned getNumBits() const
Definition TypeBase.h:8252
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition Builtins.h:320
Represents binding an expression to a temporary.
Definition ExprCXX.h:1494
CXXTemporary * getTemporary()
Definition ExprCXX.h:1512
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1372
bool isStandardLayout() const
Determine whether this class is standard-layout per C++ [class]p7.
Definition DeclCXX.h:1225
unsigned getNumBases() const
Retrieves the number of base classes of this class.
Definition DeclCXX.h:602
base_class_iterator bases_begin()
Definition DeclCXX.h:615
bool isDynamicClass() const
Definition DeclCXX.h:574
bool hasDefinition() const
Definition DeclCXX.h:561
const CXXRecordDecl * getStandardLayoutBaseWithFields() const
If this is a standard-layout class or union, any and all data members will be declared in the same ty...
Definition DeclCXX.cpp:562
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition ExprCXX.h:1069
MSGuidDecl * getGuidDecl() const
Definition ExprCXX.h:1115
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
bool isCoroElideSafe() const
Definition Expr.h:3120
arg_range arguments()
Definition Expr.h:3198
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
path_iterator path_begin()
Definition Expr.h:3749
CastKind getCastKind() const
Definition Expr.h:3723
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
path_iterator path_end()
Definition Expr.h:3750
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
@ None
Trap Messages are omitted.
@ Detailed
Trap Message includes more context (e.g.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:261
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
bool isValid() const
Definition Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:551
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:619
Address getAddress() const
Definition CGValue.h:691
void setExternallyDestructed(bool destructed=true)
Definition CGValue.h:660
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:649
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
RValue asRValue() const
Definition CGValue.h:713
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition CGBuilder.h:315
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition CGBuilder.h:302
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition CGBuilder.h:341
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition CGBuilder.h:251
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:229
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Definition CGBuilder.h:325
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition CGBuilder.h:423
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:199
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
Abstract information about a function or function prototype.
Definition CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition CGCall.h:59
All available information about a concrete callee.
Definition CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CGCall.h:172
bool isPseudoDestructor() const
Definition CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition CGCall.h:123
unsigned getBuiltinID() const
Definition CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
bool isBuiltin() const
Definition CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
RawAddress createBufferMatrixTempAddress(const LValue &LV, SourceLocation Loc, CodeGenFunction &CGF)
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
Definition CGCall.h:320
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
Definition CGExpr.cpp:5168
LValue EmitCoawaitLValue(const CoawaitExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2838
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
void EmitBoundsCheckImpl(const Expr *ArrayExpr, QualType ArrayBaseType, llvm::Value *IndexVal, QualType IndexType, llvm::Value *BoundsVal, QualType BoundsType, bool Accessed)
Definition CGExpr.cpp:1271
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
Definition CGExpr.cpp:3380
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
Definition CGExpr.cpp:6749
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:6057
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
Definition CGExpr.cpp:1352
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7182
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitInitListLValue(const InitListExpr *E)
Definition CGExpr.cpp:5939
bool isUnderlyingBasePointerConstantNull(const Expr *E)
Check whether the underlying base pointer is a constant null.
Definition CGExpr.cpp:5492
void EmitARCInitWeak(Address addr, llvm::Value *value)
i8* @objc_initWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2663
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
Definition CGExpr.cpp:4939
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
Definition CGExpr.cpp:6781
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1193
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
llvm::Type * ConvertType(QualType T)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6762
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
CGCapturedStmtInfo * CapturedStmtInfo
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
Definition CGClass.cpp:281
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
Definition CGExpr.cpp:3079
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Definition CGExpr.cpp:3846
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
Definition CGExpr.cpp:5912
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:7134
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:3000
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3998
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6767
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7160
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Emit a CallExpr without considering whether it might be a subclass.
Definition CGExpr.cpp:6446
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
Definition CGExpr.cpp:729
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7283
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
Definition CGExpr.cpp:5607
const LangOptions & getLangOpts() const
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
Definition CGExpr.cpp:4367
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:692
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition CGExpr.cpp:7111
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
Definition CGExpr.cpp:6815
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:388
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:6039
void EmitCountedByBoundsChecking(const Expr *ArrayExpr, QualType ArrayType, Address ArrayInst, QualType IndexType, llvm::Value *IndexVal, bool Accessed, bool FlexibleArray)
EmitCountedByBoundsChecking - If the array being accessed has a "counted_by" attribute,...
Definition CGExpr.cpp:4889
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition CGDecl.cpp:2300
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
Definition CGDecl.cpp:787
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
Definition CGExpr.cpp:3389
RValue EmitLoadOfGlobalRegLValue(LValue LV)
Load of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:2714
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2979
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6597
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition CGDecl.cpp:2273
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
Definition CGExpr.cpp:2082
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition CGExpr.cpp:7288
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3888
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
Definition CGExpr.cpp:6295
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
Definition CGExpr.cpp:971
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6807
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Definition CGExpr.cpp:2479
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5713
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:176
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, bool IsInBounds, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Emit the address of a field using a member data pointer.
Definition CGClass.cpp:150
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
Definition CGExpr.cpp:6318
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
Definition CGExpr.cpp:734
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6522
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:251
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6396
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2497
LValue EmitMatrixSingleSubscriptExpr(const MatrixSingleSubscriptExpr *E)
Definition CGExpr.cpp:5159
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
Definition CGExpr.cpp:5230
Address GetAddrOfBlockDecl(const VarDecl *var)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
Definition CGExpr.cpp:4328
RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateIRTempWithoutCast - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:183
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
Definition CGExpr.cpp:7149
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1255
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
LValue EmitPredefinedLValue(const PredefinedExpr *E)
Definition CGExpr.cpp:3851
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4146
LValue EmitDeclRefLValue(const DeclRefExpr *E)
Definition CGExpr.cpp:3555
LValue EmitStringLiteralLValue(const StringLiteral *E)
Definition CGExpr.cpp:3841
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6349
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2037
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1640
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1316
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5887
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2223
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:153
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6335
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
Definition CGExpr.cpp:2096
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5342
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6712
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition CGExpr.cpp:224
LValue EmitVAArgExprLValue(const VAArgExpr *E)
Definition CGExpr.cpp:6744
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
Definition CGExpr.cpp:292
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitStmtExprLValue(const StmtExpr *E)
Definition CGExpr.cpp:6847
llvm::Value * EmitARCLoadWeakRetained(Address addr)
i8* @objc_loadWeakRetained(i8** addr)
Definition CGObjC.cpp:2643
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Definition CGExpr.cpp:107
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
Definition CGExpr.cpp:6823
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2738
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4581
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2353
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1608
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
void EmitAllocToken(llvm::CallBase *CB, QualType AllocType)
Emit and set additional metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1330
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
LValue EmitCastLValue(const CastExpr *E)
EmitCastLValue - Casts are never lvalues unless that cast is to a reference type.
Definition CGExpr.cpp:6107
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
Definition CGExpr.cpp:515
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
Definition CGExpr.cpp:3399
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3960
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:302
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:273
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
Definition CGExpr.cpp:5394
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1634
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
Definition CGExpr.cpp:6301
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Definition CGExpr.cpp:6795
llvm::Type * ConvertTypeForMem(QualType T)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6729
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition CGExpr.cpp:2612
llvm::Value * EmitARCLoadWeak(Address addr)
i8* @objc_loadWeak(i8** addr) Essentially objc_autorelease(objc_loadWeakRetained(addr)).
Definition CGObjC.cpp:2636
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5601
void markStmtMaybeUsed(const Stmt *S)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6801
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7292
LValue EmitCoyieldLValue(const CoyieldExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
Definition CGExpr.cpp:4280
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
Definition CGExpr.cpp:3774
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1591
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1672
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
Definition CGExpr.cpp:3347
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
Definition CGExpr.cpp:6368
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
Definition CGObjC.cpp:2160
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
Definition CGExpr.cpp:6773
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:742
Address EmitExtVectorElementLValue(LValue V)
Generates lvalue for partial ext_vector access.
Definition CGExpr.cpp:2696
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition CGExpr.cpp:332
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition CGExpr.cpp:2649
static bool hasAggregateEvaluationKind(QualType T)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
Definition CGExpr.cpp:1649
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
Definition CGCall.cpp:4779
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:5151
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4566
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4488
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2257
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1247
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4476
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
Definition CGExpr.cpp:6758
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
generateDestroyHelper - Generates a helper function which, when invoked, destroys the given object.
LValue EmitMemberExpr(const MemberExpr *E)
Definition CGExpr.cpp:5499
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1934
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Store of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:3189
bool isOpaqueValueEmitted(const OpaqueValueExpr *E)
isOpaqueValueEmitted - Return true if the opaque value expression has already been emitted.
Definition CGExpr.cpp:6362
std::pair< llvm::Value *, CGPointerAuthInfo > EmitOrigPointerRValue(const Expr *E)
Retrieve a pointer rvalue and its ptrauth info.
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitMatrixElementExpr(const MatrixElementExpr *E)
Definition CGExpr.cpp:2312
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
Definition CGExpr.cpp:714
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
Definition CGExpr.cpp:1601
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:640
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1387
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * performAddrSpaceCast(llvm::Constant *Src, llvm::Type *DestTy)
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition CGExpr.cpp:3446
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition CGCXX.cpp:252
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A specialization of Address that requires the address to be an LLVM Constant.
Definition Address.h:296
llvm::Constant * getPointer() const
Definition Address.h:308
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
AlignmentSource getAlignmentSource() const
Definition CGValue.h:172
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getMatrixRowIdx() const
Definition CGValue.h:412
static LValue MakeMatrixRow(Address Addr, llvm::Value *RowIdx, QualType MatrixTy, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:510
bool isBitField() const
Definition CGValue.h:288
bool isMatrixElt() const
Definition CGValue.h:291
Expr * getBaseIvarExp() const
Definition CGValue.h:344
llvm::Constant * getExtVectorElts() const
Definition CGValue.h:431
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition CGValue.h:500
llvm::Constant * getMatrixRowElts() const
Definition CGValue.h:417
bool isObjCStrong() const
Definition CGValue.h:336
bool isMatrixRowSwizzle() const
Definition CGValue.h:293
bool isGlobalObjCRef() const
Definition CGValue.h:318
bool isVectorElt() const
Definition CGValue.h:287
bool isSimple() const
Definition CGValue.h:286
bool isVolatileQualified() const
Definition CGValue.h:297
RValue asAggregateRValue() const
Definition CGValue.h:545
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition CGValue.h:407
llvm::Value * getGlobalReg() const
Definition CGValue.h:452
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:454
bool isVolatile() const
Definition CGValue.h:340
const Qualifiers & getQuals() const
Definition CGValue.h:350
bool isGlobalReg() const
Definition CGValue.h:290
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:474
bool isObjCWeak() const
Definition CGValue.h:333
Address getAddress() const
Definition CGValue.h:373
unsigned getVRQualifiers() const
Definition CGValue.h:299
bool isMatrixRow() const
Definition CGValue.h:292
LValue setKnownNonNull()
Definition CGValue.h:362
bool isNonGC() const
Definition CGValue.h:315
bool isExtVectorElt() const
Definition CGValue.h:289
llvm::Value * getVectorIdx() const
Definition CGValue.h:394
void setNontemporal(bool Value)
Definition CGValue.h:331
LValueBaseInfo getBaseInfo() const
Definition CGValue.h:358
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition CGValue.h:327
QualType getType() const
Definition CGValue.h:303
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:446
bool isThreadLocalRef() const
Definition CGValue.h:321
KnownNonNull_t isKnownNonNull() const
Definition CGValue.h:361
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:347
void setNonGC(bool Value)
Definition CGValue.h:316
static LValue MakeMatrixRowSwizzle(Address MatAddr, llvm::Value *RowIdx, llvm::Constant *Cols, QualType MatrixTy, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:521
Address getVectorAddress() const
Definition CGValue.h:382
bool isNontemporal() const
Definition CGValue.h:330
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition CGValue.h:490
bool isObjCIvar() const
Definition CGValue.h:309
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:464
void setAddress(Address address)
Definition CGValue.h:375
Address getExtVectorAddress() const
Definition CGValue.h:423
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:535
Address getMatrixAddress() const
Definition CGValue.h:399
Address getBitFieldAddress() const
Definition CGValue.h:437
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
An abstract representation of an aligned address.
Definition Address.h:42
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition Address.h:93
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:77
llvm::Value * getPointer() const
Definition Address.h:66
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition Address.h:83
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3283
QualType getElementType() const
Definition TypeBase.h:3293
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
bool isFileScope() const
Definition Expr.h:3640
const Expr * getInitializer() const
Definition Expr.h:3636
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1085
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4395
unsigned getNumColumns() const
Returns the number of columns in the matrix.
Definition TypeBase.h:4414
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition TypeBase.h:4411
DeclContext * getLexicalParent()
getLexicalParent - Returns the containing lexical DeclContext.
Definition DeclBase.h:2125
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1477
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:488
ValueDecl * getDecl()
Definition Expr.h:1341
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1471
SourceLocation getLocation() const
Definition Expr.h:1349
T * getAttr() const
Definition DeclBase.h:573
SourceLocation getLocation() const
Definition DeclBase.h:439
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition DeclBase.cpp:575
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
const Expr * getBase() const
Definition Expr.h:6580
ExplicitCastExpr - An explicit cast written in the source code.
Definition Expr.h:3931
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:84
bool isGLValue() const
Definition Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:447
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition Expr.h:285
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition Expr.h:284
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1546
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3670
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:479
QualType getType() const
Definition Expr.h:144
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition Expr.cpp:3001
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4418
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4531
ExtVectorType - Extended vector type.
Definition TypeBase.h:4275
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4856
const Expr * getSubExpr() const
Definition Expr.h:1065
Represents a function declaration or definition.
Definition Decl.h:2000
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3763
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5315
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4511
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition Expr.h:7396
const OpaqueValueExpr * getCastedTemporary() const
Definition Expr.h:7447
const OpaqueValueExpr * getOpaqueArgLValue() const
Definition Expr.h:7428
bool isInOut() const
returns true if the parameter is inout and false if the parameter is out.
Definition Expr.h:7455
const Expr * getWritebackCast() const
Definition Expr.h:7442
const Expr * getArgLValue() const
Return the l-value expression that was written as the argument in source.
Definition Expr.h:7437
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4921
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4946
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4938
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4971
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4563
MatrixSingleSubscriptExpr - Matrix single subscript expression for the MatrixType extension when you ...
Definition Expr.h:2798
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition Expr.h:2868
bool isIncomplete() const
Definition Expr.h:2888
QualType getElementType() const
Returns type of the elements being stored in the matrix.
Definition TypeBase.h:4359
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3591
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3562
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3661
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition NSAPI.cpp:481
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
ObjCEncodeExpr, used for @encode in Objective-C.
Definition ExprObjC.h:407
Represents an ObjC class declaration.
Definition DeclObjC.h:1154
ObjCIvarDecl - Represents an ObjC instance variable.
Definition DeclObjC.h:1952
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition ExprObjC.h:546
ObjCIvarDecl * getDecl()
Definition ExprObjC.h:576
bool isArrow() const
Definition ExprObjC.h:584
const Expr * getBase() const
Definition ExprObjC.h:580
An expression that sends a message to the given Objective-C object or class.
Definition ExprObjC.h:937
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1361
QualType getReturnType() const
Definition DeclObjC.h:329
ObjCSelectorExpr used for @selector in Objective-C.
Definition ExprObjC.h:452
Selector getSelector() const
Definition ExprObjC.h:466
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
const Expr * getSubExpr() const
Definition Expr.h:2202
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3336
QualType getPointeeType() const
Definition TypeBase.h:3346
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
StringRef getIdentKindName() const
Definition Expr.h:2065
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2043
StringLiteral * getFunctionName()
Definition Expr.h:2052
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6803
semantics_iterator semantics_end()
Definition Expr.h:6868
semantics_iterator semantics_begin()
Definition Expr.h:6864
const Expr *const * const_semantics_iterator
Definition Expr.h:6863
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition Expr.h:6851
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8472
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1459
QualType withoutLocalFastQualifiers() const
Definition TypeBase.h:1220
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8514
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8428
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8573
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8482
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1185
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition TypeBase.h:1036
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool hasConst() const
Definition TypeBase.h:457
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void removeObjCGCAttr()
Definition TypeBase.h:523
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
void removePointerAuth()
Definition TypeBase.h:610
void setAddressSpace(LangAS space)
Definition TypeBase.h:591
bool hasVolatile() const
Definition TypeBase.h:467
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:603
ObjCLifetime getObjCLifetime() const
Definition TypeBase.h:545
Represents a struct/union/class.
Definition Decl.h:4327
field_range fields() const
Definition Decl.h:4530
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition Decl.h:4511
bool isAnonymousStructOrUnion() const
Whether this is an anonymous struct or union.
Definition Decl.h:4379
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition Expr.h:4598
StmtClass getStmtClass() const
Definition Stmt.h:1485
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
bool isUnion() const
Definition Decl.h:3928
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual StringRef getABI() const
Get the ABI currently in use.
The base class of the type hierarchy.
Definition TypeBase.h:1839
bool isBlockPointerType() const
Definition TypeBase.h:8645
bool isVoidType() const
Definition TypeBase.h:8991
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2253
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:419
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition Type.cpp:1964
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9294
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8728
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isArrayType() const
Definition TypeBase.h:8724
bool isFunctionPointerType() const
Definition TypeBase.h:8692
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2374
bool isConstantMatrixType() const
Definition TypeBase.h:8792
bool isPointerType() const
Definition TypeBase.h:8625
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9035
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9285
bool isReferenceType() const
Definition TypeBase.h:8649
bool isEnumeralType() const
Definition TypeBase.h:8756
bool isVariableArrayType() const
Definition TypeBase.h:8736
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isExtVectorBoolType() const
Definition TypeBase.h:8772
bool isBitIntType() const
Definition TypeBase.h:8900
bool isConstantMatrixBoolType() const
Definition TypeBase.h:8778
bool isAnyComplexType() const
Definition TypeBase.h:8760
bool hasPointeeToCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8677
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9171
bool isAtomicType() const
Definition TypeBase.h:8817
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2808
bool isObjectType() const
Determine whether this type is an object type.
Definition TypeBase.h:2516
bool isHLSLResourceRecord() const
Definition Type.cpp:5443
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
Definition Type.h:53
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2479
bool isFunctionType() const
Definition TypeBase.h:8621
bool isObjCObjectPointerType() const
Definition TypeBase.h:8804
bool isVectorType() const
Definition TypeBase.h:8764
bool isAnyPointerType() const
Definition TypeBase.h:8633
bool isSubscriptableVectorType() const
Definition TypeBase.h:8784
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9218
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition Type.cpp:654
bool isRecordType() const
Definition TypeBase.h:8752
bool isHLSLResourceRecordArray() const
Definition Type.cpp:5447
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2396
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8671
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2180
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2378
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
@ TLS_None
Not a TLS variable.
Definition Decl.h:946
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3974
Represents a GCC generic vector type.
Definition TypeBase.h:4183
unsigned getNumElements() const
Definition TypeBase.h:4198
#define INT_MIN
Definition limits.h:55
Definition SPIR.cpp:35
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition CGValue.h:142
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ ARCImpreciseLifetime
Definition CGValue.h:137
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition CGValue.h:160
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition Specifiers.h:154
@ SC_Register
Definition Specifiers.h:257
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition Specifiers.h:339
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
LangAS
Defines the address space values used by the address space qualifier of QualType.
llvm::cl::opt< bool > ClSanitizeGuardChecks
SmallVector< CXXBaseSpecifier *, 4 > CXXCastPath
A simple array of base specifiers.
Definition ASTContext.h:150
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
Definition TypeBase.h:5919
bool isLambdaMethod(const DeclContext *DC)
Definition ASTLambda.h:39
@ Other
Other implicit parameter.
Definition Decl.h:1746
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
uint64_t Offset
Offset - The byte offset of the final access within the base one.
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
llvm::MDNode * BaseType
BaseType - The base/leading access type.
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:615
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition Expr.h:68