clang 23.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGObjCRuntime.h"
21#include "CGOpenMPRuntime.h"
22#include "CGRecordLayout.h"
23#include "CodeGenFunction.h"
24#include "CodeGenModule.h"
25#include "CodeGenPGO.h"
26#include "ConstantEmitter.h"
27#include "TargetInfo.h"
29#include "clang/AST/ASTLambda.h"
30#include "clang/AST/Attr.h"
31#include "clang/AST/DeclObjC.h"
32#include "clang/AST/Expr.h"
34#include "clang/AST/NSAPI.h"
39#include "clang/Basic/Module.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/StringExtras.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/Support/ConvertUTF.h"
51#include "llvm/Support/Endian.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/Path.h"
54#include "llvm/Support/xxhash.h"
55#include "llvm/Transforms/Utils/SanitizerStats.h"
56
57#include <numeric>
58#include <optional>
59#include <string>
60
61using namespace clang;
62using namespace CodeGen;
63
64namespace clang {
65// TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed
66// by -fsanitize-skip-hot-cutoff
67llvm::cl::opt<bool> ClSanitizeGuardChecks(
68 "ubsan-guard-checks", llvm::cl::Optional,
69 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
70
71} // namespace clang
72
73//===--------------------------------------------------------------------===//
74// Defines for metadata
75//===--------------------------------------------------------------------===//
76
77// Those values are crucial to be the SAME as in ubsan runtime library.
79 /// An integer type.
80 TK_Integer = 0x0000,
81 /// A floating-point type.
82 TK_Float = 0x0001,
83 /// An _BitInt(N) type.
84 TK_BitInt = 0x0002,
85 /// Any other type. The value representation is unspecified.
86 TK_Unknown = 0xffff
87};
88
89//===--------------------------------------------------------------------===//
90// Miscellaneous Helper Methods
91//===--------------------------------------------------------------------===//
92
93static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID) {
94 switch (ID) {
95#define SANITIZER_CHECK(Enum, Name, Version, Msg) \
96 case SanitizerHandler::Enum: \
97 return Msg;
99#undef SANITIZER_CHECK
100 }
101 llvm_unreachable("unhandled switch case");
102}
103
104/// CreateTempAlloca - This creates a alloca and inserts it into the entry
105/// block.
108 const Twine &Name,
109 llvm::Value *ArraySize) {
110 if (getLangOpts().EmitLogicalPointer) {
111 auto Alloca = Builder.CreateStructuredAlloca(Ty, Name);
112 return RawAddress(Alloca, Ty, Align, KnownNonNull);
113 }
114
115 auto *Alloca = CreateTempAlloca(Ty, Name, ArraySize);
116 Alloca->setAlignment(Align.getAsAlign());
117 return RawAddress(Alloca, Ty, Align, KnownNonNull);
118}
119
120RawAddress CodeGenFunction::MaybeCastStackAddressSpace(RawAddress Alloca,
121 LangAS DestLangAS,
122 llvm::Value *ArraySize) {
123
124 llvm::Value *V = Alloca.getPointer();
125 // Alloca always returns a pointer in alloca address space, which may
126 // be different from the type defined by the language. For example,
127 // in C++ the auto variables are in the default address space. Therefore
128 // cast alloca to the default address space when necessary.
129
130 unsigned DestAddrSpace = getContext().getTargetAddressSpace(DestLangAS);
131 if (DestAddrSpace != Alloca.getAddressSpace()) {
132 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
133 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
134 // otherwise alloca is inserted at the current insertion point of the
135 // builder.
136 if (!ArraySize)
137 Builder.SetInsertPoint(getPostAllocaInsertPoint());
138 V = performAddrSpaceCast(V, Builder.getPtrTy(DestAddrSpace));
139 }
140
141 return RawAddress(V, Alloca.getElementType(), Alloca.getAlignment(),
143}
144
146 CharUnits Align, const Twine &Name,
147 llvm::Value *ArraySize,
148 RawAddress *AllocaAddr) {
149 RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
150 if (AllocaAddr)
151 *AllocaAddr = Alloca;
152 return MaybeCastStackAddressSpace(Alloca, DestLangAS, ArraySize);
153}
154
155/// CreateTempAlloca - This creates an alloca and inserts it into the entry
156/// block if \p ArraySize is nullptr, otherwise inserts it at the current
157/// insertion point of the builder.
158llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
159 const Twine &Name,
160 llvm::Value *ArraySize) {
161 llvm::AllocaInst *Alloca;
162 if (ArraySize)
163 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
164 else
165 Alloca =
166 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
167 ArraySize, Name, AllocaInsertPt->getIterator());
168 if (SanOpts.Mask & SanitizerKind::Address) {
169 Alloca->addAnnotationMetadata({"alloca_name_altered", Name.str()});
170 }
171 if (Allocas) {
172 Allocas->Add(Alloca);
173 }
174 return Alloca;
175}
176
177/// CreateDefaultAlignTempAlloca - This creates an alloca with the
178/// default alignment of the corresponding LLVM type, which is *not*
179/// guaranteed to be related in any way to the expected alignment of
180/// an AST type that might have been lowered to Ty.
182 const Twine &Name) {
183 CharUnits Align =
184 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
185 return CreateTempAlloca(Ty, Align, Name);
186}
187
189 const Twine &Name) {
191 return CreateTempAllocaWithoutCast(ConvertType(Ty), Align, Name, nullptr);
192}
193
195 RawAddress *Alloca) {
196 // FIXME: Should we prefer the preferred type alignment here?
197 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
198}
199
201 const Twine &Name,
202 RawAddress *Alloca) {
204 /*ArraySize=*/nullptr, Alloca);
205
206 if (Ty->isConstantMatrixType()) {
207 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
208 auto *ArrayElementTy = ArrayTy->getElementType();
209 auto ArrayElements = ArrayTy->getNumElements();
210 if (getContext().getLangOpts().HLSL) {
211 auto *VectorTy = cast<llvm::FixedVectorType>(ArrayElementTy);
212 ArrayElementTy = VectorTy->getElementType();
213 ArrayElements *= VectorTy->getNumElements();
214 }
215 auto *VectorTy = llvm::FixedVectorType::get(ArrayElementTy, ArrayElements);
216
217 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
219 }
220 return Result;
221}
222
224 CharUnits Align,
225 const Twine &Name) {
226 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
227}
228
230 const Twine &Name) {
231 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
232 Name);
233}
234
235/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
236/// expression and compare the result against zero, returning an Int1Ty value.
238 PGO->setCurrentStmt(E);
239 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
240 llvm::Value *MemPtr = EmitScalarExpr(E);
241 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
242 }
243
244 QualType BoolTy = getContext().BoolTy;
245 SourceLocation Loc = E->getExprLoc();
246 CGFPOptionsRAII FPOptsRAII(*this, E);
247 if (!E->getType()->isAnyComplexType())
248 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
249
251 Loc);
252}
253
254/// EmitIgnoredExpr - Emit code to compute the specified expression,
255/// ignoring the result.
257 if (E->isPRValue())
258 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
259
260 // if this is a bitfield-resulting conditional operator, we can special case
261 // emit this. The normal 'EmitLValue' version of this is particularly
262 // difficult to codegen for, since creating a single "LValue" for two
263 // different sized arguments here is not particularly doable.
264 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
266 if (CondOp->getObjectKind() == OK_BitField)
267 return EmitIgnoredConditionalOperator(CondOp);
268 }
269
270 // Just emit it as an l-value and drop the result.
271 EmitLValue(E);
272}
273
274/// EmitAnyExpr - Emit code to compute the specified expression which
275/// can have any type. The result is returned as an RValue struct.
276/// If this is an aggregate expression, AggSlot indicates where the
277/// result should be returned.
279 AggValueSlot aggSlot,
280 bool ignoreResult) {
281 switch (getEvaluationKind(E->getType())) {
282 case TEK_Scalar:
283 return RValue::get(EmitScalarExpr(E, ignoreResult));
284 case TEK_Complex:
285 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
286 case TEK_Aggregate:
287 if (!ignoreResult && aggSlot.isIgnored())
288 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
289 EmitAggExpr(E, aggSlot);
290 return aggSlot.asRValue();
291 }
292 llvm_unreachable("bad evaluation kind");
293}
294
295/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
296/// always be accessible even if no aggregate location is provided.
299
301 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
302 return EmitAnyExpr(E, AggSlot);
303}
304
305/// EmitAnyExprToMem - Evaluate an expression into a given memory
306/// location.
308 Address Location,
309 Qualifiers Quals,
310 bool IsInit) {
311 // FIXME: This function should take an LValue as an argument.
312 switch (getEvaluationKind(E->getType())) {
313 case TEK_Complex:
315 /*isInit*/ false);
316 return;
317
318 case TEK_Aggregate: {
319 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
324 return;
325 }
326
327 case TEK_Scalar: {
328 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
329 LValue LV = MakeAddrLValue(Location, E->getType());
331 return;
332 }
333 }
334 llvm_unreachable("bad evaluation kind");
335}
336
338 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
339 QualType Type = LV.getType();
340 switch (getEvaluationKind(Type)) {
341 case TEK_Complex:
342 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
343 return;
344 case TEK_Aggregate:
348 AggValueSlot::MayOverlap, IsZeroed));
349 return;
350 case TEK_Scalar:
351 if (LV.isSimple())
352 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
353 else
355 return;
356 }
357 llvm_unreachable("bad evaluation kind");
358}
359
360static void
362 const Expr *E, Address ReferenceTemporary) {
363 // Objective-C++ ARC:
364 // If we are binding a reference to a temporary that has ownership, we
365 // need to perform retain/release operations on the temporary.
366 //
367 // FIXME: This should be looking at E, not M.
368 if (auto Lifetime = M->getType().getObjCLifetime()) {
369 switch (Lifetime) {
372 // Carry on to normal cleanup handling.
373 break;
374
376 // Nothing to do; cleaned up by an autorelease pool.
377 return;
378
381 switch (StorageDuration Duration = M->getStorageDuration()) {
382 case SD_Static:
383 // Note: we intentionally do not register a cleanup to release
384 // the object on program termination.
385 return;
386
387 case SD_Thread:
388 // FIXME: We should probably register a cleanup in this case.
389 return;
390
391 case SD_Automatic:
395 if (Lifetime == Qualifiers::OCL_Strong) {
396 const ValueDecl *VD = M->getExtendingDecl();
397 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
398 VD->hasAttr<ObjCPreciseLifetimeAttr>();
402 } else {
403 // __weak objects always get EH cleanups; otherwise, exceptions
404 // could cause really nasty crashes instead of mere leaks.
407 }
408 if (Duration == SD_FullExpression)
409 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
410 M->getType(), *Destroy,
412 else
413 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
414 M->getType(),
415 *Destroy, CleanupKind & EHCleanup);
416 return;
417
418 case SD_Dynamic:
419 llvm_unreachable("temporary cannot have dynamic storage duration");
420 }
421 llvm_unreachable("unknown storage duration");
422 }
423 }
424
426 if (DK != QualType::DK_none) {
427 switch (M->getStorageDuration()) {
428 case SD_Static:
429 case SD_Thread: {
430 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
431 if (const auto *ClassDecl =
433 ClassDecl && !ClassDecl->hasTrivialDestructor())
434 // Get the destructor for the reference temporary.
435 ReferenceTemporaryDtor = ClassDecl->getDestructor();
436
437 if (!ReferenceTemporaryDtor)
438 return;
439
440 llvm::FunctionCallee CleanupFn;
441 llvm::Constant *CleanupArg;
442 if (E->getType()->isArrayType()) {
444 ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject,
445 CGF.getLangOpts().Exceptions,
446 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
447 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
448 } else {
449 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
450 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
451 CleanupArg =
452 cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
453 }
455 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
456 } break;
458 CGF.pushDestroy(DK, ReferenceTemporary, E->getType());
459 break;
460 case SD_Automatic:
461 CGF.pushLifetimeExtendedDestroy(DK, ReferenceTemporary, E->getType());
462 break;
463 case SD_Dynamic:
464 llvm_unreachable("temporary cannot have dynamic storage duration");
465 }
466 }
467}
468
471 const Expr *Inner,
472 RawAddress *Alloca = nullptr) {
473 switch (M->getStorageDuration()) {
475 case SD_Automatic: {
476 // If we have a constant temporary array or record try to promote it into a
477 // constant global under the same rules a normal constant would've been
478 // promoted. This is easier on the optimizer and generally emits fewer
479 // instructions.
480 QualType Ty = Inner->getType();
481 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
482 (Ty->isArrayType() || Ty->isRecordType()) &&
483 Ty.isConstantStorage(CGF.getContext(), true, false))
484 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
485 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
486 auto *GV = new llvm::GlobalVariable(
487 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
488 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
489 llvm::GlobalValue::NotThreadLocal,
491 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
492 GV->setAlignment(alignment.getAsAlign());
493 llvm::Constant *C = GV;
494 if (AS != LangAS::Default)
496 GV, llvm::PointerType::get(
497 CGF.getLLVMContext(),
499 // FIXME: Should we put the new global into a COMDAT?
500 return RawAddress(C, GV->getValueType(), alignment);
501 }
502 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
503 }
504 case SD_Thread:
505 case SD_Static:
506 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
507
508 case SD_Dynamic:
509 llvm_unreachable("temporary can't have dynamic storage duration");
510 }
511 llvm_unreachable("unknown storage duration");
512}
513
514/// Helper method to check if the underlying ABI is AAPCS
515static bool isAAPCS(const TargetInfo &TargetInfo) {
516 return TargetInfo.getABI().starts_with("aapcs");
517}
518
521 const Expr *E = M->getSubExpr();
522
523 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
524 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
525 "Reference should never be pseudo-strong!");
526
527 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
528 // as that will cause the lifetime adjustment to be lost for ARC
529 auto ownership = M->getType().getObjCLifetime();
530 if (ownership != Qualifiers::OCL_None &&
531 ownership != Qualifiers::OCL_ExplicitNone) {
533 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
534 llvm::Type *Ty = ConvertTypeForMem(E->getType());
535 Object = Object.withElementType(Ty);
536
537 // createReferenceTemporary will promote the temporary to a global with a
538 // constant initializer if it can. It can only do this to a value of
539 // ARC-manageable type if the value is global and therefore "immune" to
540 // ref-counting operations. Therefore we have no need to emit either a
541 // dynamic initialization or a cleanup and we can just return the address
542 // of the temporary.
543 if (Var->hasInitializer())
545
546 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
547 }
548 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
550
551 switch (getEvaluationKind(E->getType())) {
552 default: llvm_unreachable("expected scalar or aggregate expression");
553 case TEK_Scalar:
554 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
555 break;
556 case TEK_Aggregate: {
558 E->getType().getQualifiers(),
563 break;
564 }
565 }
566
567 pushTemporaryCleanup(*this, M, E, Object);
568 return RefTempDst;
569 }
570
573 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
574
575 for (const auto &Ignored : CommaLHSs)
576 EmitIgnoredExpr(Ignored);
577
578 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
579 if (opaque->getType()->isRecordType()) {
580 assert(Adjustments.empty());
581 return EmitOpaqueValueLValue(opaque);
582 }
583 }
584
585 // Create and initialize the reference temporary.
586 RawAddress Alloca = Address::invalid();
587 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
588 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
589 Object.getPointer()->stripPointerCasts())) {
590 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
591 Object = Object.withElementType(TemporaryType);
592 // If the temporary is a global and has a constant initializer or is a
593 // constant temporary that we promoted to a global, we may have already
594 // initialized it.
595 if (!Var->hasInitializer()) {
596 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
598 if (RefType.getPointerAuth()) {
599 // Use the qualifier of the reference temporary to sign the pointer.
600 LValue LV = MakeRawAddrLValue(Object.getPointer(), RefType,
601 Object.getAlignment());
602 EmitScalarInit(E, M->getExtendingDecl(), LV, false);
603 } else {
604 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true);
605 }
606 }
607 } else {
608 switch (M->getStorageDuration()) {
609 case SD_Automatic:
610 if (EmitLifetimeStart(Alloca.getPointer())) {
612 Alloca);
613 }
614 break;
615
616 case SD_FullExpression: {
617 if (!ShouldEmitLifetimeMarkers)
618 break;
619
620 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
621 // marker. Instead, start the lifetime of a conditional temporary earlier
622 // so that it's unconditional. Don't do this with sanitizers which need
623 // more precise lifetime marks. However when inside an "await.suspend"
624 // block, we should always avoid conditional cleanup because it creates
625 // boolean marker that lives across await_suspend, which can destroy coro
626 // frame.
627 ConditionalEvaluation *OldConditional = nullptr;
628 CGBuilderTy::InsertPoint OldIP;
630 ((!SanOpts.has(SanitizerKind::HWAddress) &&
631 !SanOpts.has(SanitizerKind::Memory) &&
632 !SanOpts.has(SanitizerKind::MemtagStack) &&
633 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
634 inSuspendBlock())) {
635 OldConditional = OutermostConditional;
636 OutermostConditional = nullptr;
637
638 OldIP = Builder.saveIP();
639 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
640 Builder.restoreIP(CGBuilderTy::InsertPoint(
641 Block, llvm::BasicBlock::iterator(Block->back())));
642 }
643
644 if (EmitLifetimeStart(Alloca.getPointer())) {
646 }
647
648 if (OldConditional) {
649 OutermostConditional = OldConditional;
650 Builder.restoreIP(OldIP);
651 }
652 break;
653 }
654
655 default:
656 break;
657 }
658 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
659 }
660 pushTemporaryCleanup(*this, M, E, Object);
661
662 // Perform derived-to-base casts and/or field accesses, to get from the
663 // temporary object we created (and, potentially, for which we extended
664 // the lifetime) to the subobject we're binding the reference to.
665 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
666 switch (Adjustment.Kind) {
668 Object =
669 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
670 Adjustment.DerivedToBase.BasePath->path_begin(),
671 Adjustment.DerivedToBase.BasePath->path_end(),
672 /*NullCheckValue=*/ false, E->getExprLoc());
673 break;
674
677 LV = EmitLValueForField(LV, Adjustment.Field);
678 assert(LV.isSimple() &&
679 "materialized temporary field is not a simple lvalue");
680 Object = LV.getAddress();
681 break;
682 }
683
685 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
687 E, Object, Ptr, Adjustment.Ptr.MPT, /*IsInBounds=*/true);
688 break;
689 }
690 }
691 }
692
694}
695
696RValue
698 // Emit the expression as an lvalue.
699 LValue LV = EmitLValue(E);
700 assert(LV.isSimple());
701 llvm::Value *Value = LV.getPointer(*this);
702
704 // C++11 [dcl.ref]p5 (as amended by core issue 453):
705 // If a glvalue to which a reference is directly bound designates neither
706 // an existing object or function of an appropriate type nor a region of
707 // storage of suitable size and alignment to contain an object of the
708 // reference's type, the behavior is undefined.
709 QualType Ty = E->getType();
711 }
712
713 return RValue::get(Value);
714}
715
716
717/// getAccessedFieldNo - Given an encoded value and a result number, return the
718/// input field number being accessed.
720 const llvm::Constant *Elts) {
721 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
722 ->getZExtValue();
723}
724
725static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
726 llvm::Value *Ptr) {
727 llvm::Value *A0 =
728 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
729 llvm::Value *A1 =
730 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
731 return Builder.CreateXor(Acc, A1);
732}
733
738
741 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
742 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
745}
746
748 return SanOpts.has(SanitizerKind::Null) ||
749 SanOpts.has(SanitizerKind::Alignment) ||
750 SanOpts.has(SanitizerKind::ObjectSize) ||
751 SanOpts.has(SanitizerKind::Vptr);
752}
753
755 llvm::Value *Ptr, QualType Ty,
756 CharUnits Alignment,
757 SanitizerSet SkippedChecks,
758 llvm::Value *ArraySize) {
760 return;
761
762 // Don't check pointers outside the default address space. The null check
763 // isn't correct, the object-size check isn't supported by LLVM, and we can't
764 // communicate the addresses to the runtime handler for the vptr check.
765 if (Ptr->getType()->getPointerAddressSpace())
766 return;
767
768 // Don't check pointers to volatile data. The behavior here is implementation-
769 // defined.
770 if (Ty.isVolatileQualified())
771 return;
772
773 // Quickly determine whether we have a pointer to an alloca. It's possible
774 // to skip null checks, and some alignment checks, for these pointers. This
775 // can reduce compile-time significantly.
776 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
777
778 llvm::Value *IsNonNull = nullptr;
779 bool IsGuaranteedNonNull =
780 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
781
782 llvm::BasicBlock *Done = nullptr;
783 bool DoneViaNullSanitize = false;
784
785 {
786 auto CheckHandler = SanitizerHandler::TypeMismatch;
787 SanitizerDebugLocation SanScope(this,
788 {SanitizerKind::SO_Null,
789 SanitizerKind::SO_ObjectSize,
790 SanitizerKind::SO_Alignment},
791 CheckHandler);
792
794 Checks;
795
796 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
797 bool AllowNullPointers = isNullPointerAllowed(TCK);
798 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
799 !IsGuaranteedNonNull) {
800 // The glvalue must not be an empty glvalue.
801 IsNonNull = Builder.CreateIsNotNull(Ptr);
802
803 // The IR builder can constant-fold the null check if the pointer points
804 // to a constant.
805 IsGuaranteedNonNull = IsNonNull == True;
806
807 // Skip the null check if the pointer is known to be non-null.
808 if (!IsGuaranteedNonNull) {
809 if (AllowNullPointers) {
810 // When performing pointer casts, it's OK if the value is null.
811 // Skip the remaining checks in that case.
812 Done = createBasicBlock("null");
813 DoneViaNullSanitize = true;
814 llvm::BasicBlock *Rest = createBasicBlock("not.null");
815 Builder.CreateCondBr(IsNonNull, Rest, Done);
816 EmitBlock(Rest);
817 } else {
818 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
819 }
820 }
821 }
822
823 if (SanOpts.has(SanitizerKind::ObjectSize) &&
824 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
825 !Ty->isIncompleteType()) {
826 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
827 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
828 if (ArraySize)
829 Size = Builder.CreateMul(Size, ArraySize);
830
831 // Degenerate case: new X[0] does not need an objectsize check.
832 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
833 if (!ConstantSize || !ConstantSize->isNullValue()) {
834 // The glvalue must refer to a large enough storage region.
835 // FIXME: If Address Sanitizer is enabled, insert dynamic
836 // instrumentation
837 // to check this.
838 // FIXME: Get object address space
839 llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy};
840 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
841 llvm::Value *Min = Builder.getFalse();
842 llvm::Value *NullIsUnknown = Builder.getFalse();
843 llvm::Value *Dynamic = Builder.getFalse();
844 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
845 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
846 Checks.push_back(
847 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
848 }
849 }
850
851 llvm::MaybeAlign AlignVal;
852 llvm::Value *PtrAsInt = nullptr;
853
854 if (SanOpts.has(SanitizerKind::Alignment) &&
855 !SkippedChecks.has(SanitizerKind::Alignment)) {
856 AlignVal = Alignment.getAsMaybeAlign();
857 if (!Ty->isIncompleteType() && !AlignVal)
858 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
859 /*ForPointeeType=*/true)
860 .getAsMaybeAlign();
861
862 // The glvalue must be suitably aligned.
863 if (AlignVal && *AlignVal > llvm::Align(1) &&
864 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
865 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
866 llvm::Value *Align = Builder.CreateAnd(
867 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
868 llvm::Value *Aligned =
869 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
870 if (Aligned != True)
871 Checks.push_back(
872 std::make_pair(Aligned, SanitizerKind::SO_Alignment));
873 }
874 }
875
876 if (Checks.size() > 0) {
877 llvm::Constant *StaticData[] = {
879 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
880 llvm::ConstantInt::get(Int8Ty, TCK)};
881 EmitCheck(Checks, CheckHandler, StaticData, PtrAsInt ? PtrAsInt : Ptr);
882 }
883 }
884
885 // If possible, check that the vptr indicates that there is a subobject of
886 // type Ty at offset zero within this object.
887 //
888 // C++11 [basic.life]p5,6:
889 // [For storage which does not refer to an object within its lifetime]
890 // The program has undefined behavior if:
891 // -- the [pointer or glvalue] is used to access a non-static data member
892 // or call a non-static member function
893 if (SanOpts.has(SanitizerKind::Vptr) &&
894 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
895 SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr},
896 SanitizerHandler::DynamicTypeCacheMiss);
897
898 // Ensure that the pointer is non-null before loading it. If there is no
899 // compile-time guarantee, reuse the run-time null check or emit a new one.
900 if (!IsGuaranteedNonNull) {
901 if (!IsNonNull)
902 IsNonNull = Builder.CreateIsNotNull(Ptr);
903 if (!Done)
904 Done = createBasicBlock("vptr.null");
905 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
906 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
907 EmitBlock(VptrNotNull);
908 }
909
910 // Compute a deterministic hash of the mangled name of the type.
911 SmallString<64> MangledName;
912 llvm::raw_svector_ostream Out(MangledName);
913 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
914 Out);
915
916 // Contained in NoSanitizeList based on the mangled type.
917 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
918 Out.str())) {
919 // Load the vptr, and mix it with TypeHash.
920 llvm::Value *TypeHash =
921 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
922
923 llvm::Type *VPtrTy = llvm::PointerType::get(getLLVMContext(), 0);
924 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
925 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
926 Ty->getAsCXXRecordDecl(),
928 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
929
930 llvm::Value *Hash =
931 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
932 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
933
934 // Look the hash up in our cache.
935 const int CacheSize = 128;
936 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
937 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
938 "__ubsan_vptr_type_cache");
939 llvm::Value *Slot = Builder.CreateAnd(Hash,
940 llvm::ConstantInt::get(IntPtrTy,
941 CacheSize-1));
942 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
943 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
944 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
946
947 // If the hash isn't in the cache, call a runtime handler to perform the
948 // hard work of checking whether the vptr is for an object of the right
949 // type. This will either fill in the cache and return, or produce a
950 // diagnostic.
951 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
952 llvm::Constant *StaticData[] = {
955 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
956 llvm::ConstantInt::get(Int8Ty, TCK)
957 };
958 llvm::Value *DynamicData[] = { Ptr, Hash };
959 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
960 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
961 DynamicData);
962 }
963 }
964
965 if (Done) {
966 SanitizerDebugLocation SanScope(
967 this,
968 {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr},
969 DoneViaNullSanitize ? SanitizerHandler::TypeMismatch
970 : SanitizerHandler::DynamicTypeCacheMiss);
971 Builder.CreateBr(Done);
972 EmitBlock(Done);
973 }
974}
975
977 QualType EltTy) {
979 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
980 if (!EltSize)
981 return nullptr;
982
983 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
984 if (!ArrayDeclRef)
985 return nullptr;
986
987 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
988 if (!ParamDecl)
989 return nullptr;
990
991 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
992 if (!POSAttr)
993 return nullptr;
994
995 // Don't load the size if it's a lower bound.
996 int POSType = POSAttr->getType();
997 if (POSType != 0 && POSType != 1)
998 return nullptr;
999
1000 // Find the implicit size parameter.
1001 auto PassedSizeIt = SizeArguments.find(ParamDecl);
1002 if (PassedSizeIt == SizeArguments.end())
1003 return nullptr;
1004
1005 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
1006 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
1007 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
1008 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
1009 C.getSizeType(), E->getExprLoc());
1010 llvm::Value *SizeOfElement =
1011 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
1012 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
1013}
1014
1015/// If Base is known to point to the start of an array, return the length of
1016/// that array. Return 0 if the length cannot be determined.
1018 const Expr *Base,
1019 QualType &IndexedType,
1021 StrictFlexArraysLevel) {
1022 // For the vector indexing extension, the bound is the number of elements.
1023 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
1024 IndexedType = Base->getType();
1025 return CGF.Builder.getInt32(VT->getNumElements());
1026 }
1027
1028 Base = Base->IgnoreParens();
1029
1030 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1031 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
1032 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
1033 StrictFlexArraysLevel)) {
1034 CodeGenFunction::SanitizerScope SanScope(&CGF);
1035
1036 IndexedType = CE->getSubExpr()->getType();
1037 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
1038 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
1039 return CGF.Builder.getInt(CAT->getSize());
1040
1041 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
1042 return CGF.getVLASize(VAT).NumElts;
1043 // Ignore pass_object_size here. It's not applicable on decayed pointers.
1044 }
1045 }
1046
1047 CodeGenFunction::SanitizerScope SanScope(&CGF);
1048
1049 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
1050 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
1051 IndexedType = Base->getType();
1052 return POS;
1053 }
1054
1055 return nullptr;
1056}
1057
1058namespace {
1059
1060/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1061/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1062///
1063/// p in p-> a.b.c
1064///
1065/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1066/// looking for:
1067///
1068/// struct s {
1069/// struct s *ptr;
1070/// int count;
1071/// char array[] __attribute__((counted_by(count)));
1072/// };
1073///
1074/// If we have an expression like \p p->ptr->array[index], we want the
1075/// \p MemberExpr for \p p->ptr instead of \p p.
1076class StructAccessBase
1077 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1078 const RecordDecl *ExpectedRD;
1079
1080 bool IsExpectedRecordDecl(const Expr *E) const {
1081 QualType Ty = E->getType();
1082 if (Ty->isPointerType())
1083 Ty = Ty->getPointeeType();
1084 return ExpectedRD == Ty->getAsRecordDecl();
1085 }
1086
1087public:
1088 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1089
1090 //===--------------------------------------------------------------------===//
1091 // Visitor Methods
1092 //===--------------------------------------------------------------------===//
1093
1094 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1095 // horrors like this:
1096 //
1097 // struct S {
1098 // int x, y;
1099 // int blah[] __attribute__((counted_by(x)));
1100 // } s;
1101 //
1102 // int foo(int index, int val) {
1103 // int (S::*IHatePMDs)[] = &S::blah;
1104 // (s.*IHatePMDs)[index] = val;
1105 // }
1106
1107 const Expr *Visit(const Expr *E) {
1108 return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
1109 }
1110
1111 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1112
1113 // These are the types we expect to return (in order of most to least
1114 // likely):
1115 //
1116 // 1. DeclRefExpr - This is the expression for the base of the structure.
1117 // It's exactly what we want to build an access to the \p counted_by
1118 // field.
1119 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1120 // as the flexble array member's lexical enclosing \p RecordDecl. This
1121 // allows us to catch things like: "p->p->array"
1122 // 3. CompoundLiteralExpr - This is for people who create something
1123 // heretical like (struct foo has a flexible array member):
1124 //
1125 // (struct foo){ 1, 2 }.blah[idx];
1126 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1127 return IsExpectedRecordDecl(E) ? E : nullptr;
1128 }
1129 const Expr *VisitMemberExpr(const MemberExpr *E) {
1130 if (IsExpectedRecordDecl(E) && E->isArrow())
1131 return E;
1132 const Expr *Res = Visit(E->getBase());
1133 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1134 }
1135 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1136 return IsExpectedRecordDecl(E) ? E : nullptr;
1137 }
1138 const Expr *VisitCallExpr(const CallExpr *E) {
1139 return IsExpectedRecordDecl(E) ? E : nullptr;
1140 }
1141
1142 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1143 if (IsExpectedRecordDecl(E))
1144 return E;
1145 return Visit(E->getBase());
1146 }
1147 const Expr *VisitCastExpr(const CastExpr *E) {
1148 if (E->getCastKind() == CK_LValueToRValue)
1149 return IsExpectedRecordDecl(E) ? E : nullptr;
1150 return Visit(E->getSubExpr());
1151 }
1152 const Expr *VisitParenExpr(const ParenExpr *E) {
1153 return Visit(E->getSubExpr());
1154 }
1155 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1156 return Visit(E->getSubExpr());
1157 }
1158 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1159 return Visit(E->getSubExpr());
1160 }
1161};
1162
1163} // end anonymous namespace
1164
1166
1168 const FieldDecl *Field,
1169 RecIndicesTy &Indices) {
1170 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1171 int64_t FieldNo = -1;
1172 for (const FieldDecl *FD : RD->fields()) {
1173 if (!Layout.containsFieldDecl(FD))
1174 // This could happen if the field has a struct type that's empty. I don't
1175 // know why either.
1176 continue;
1177
1178 FieldNo = Layout.getLLVMFieldNo(FD);
1179 if (FD == Field) {
1180 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1181 return true;
1182 }
1183
1184 QualType Ty = FD->getType();
1185 if (Ty->isRecordType()) {
1186 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1187 if (RD->isUnion())
1188 FieldNo = 0;
1189 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1190 return true;
1191 }
1192 }
1193 }
1194
1195 return false;
1196}
1197
1199 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1200 // Find the record containing the count field. Walk up through anonymous
1201 // structs/unions (which are transparent in C) but stop at named records.
1202 // Using getOuterLexicalRecordContext() here would be wrong because it walks
1203 // past named nested structs to the outermost record, causing a crash when a
1204 // struct with a counted_by FAM is defined nested inside another struct.
1205 const RecordDecl *RD = CountDecl->getParent();
1206 while (RD->isAnonymousStructOrUnion()) {
1207 const auto *Parent = dyn_cast<RecordDecl>(RD->getLexicalParent());
1208 if (!Parent)
1209 break;
1210 RD = Parent;
1211 }
1212
1213 // Find the base struct expr (i.e. p in p->a.b.c.d).
1214 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1215 if (!StructBase || StructBase->HasSideEffects(getContext()))
1216 return nullptr;
1217
1218 llvm::Value *Res = nullptr;
1219 if (StructBase->getType()->isPointerType()) {
1220 LValueBaseInfo BaseInfo;
1221 TBAAAccessInfo TBAAInfo;
1222 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1223 Res = Addr.emitRawPointer(*this);
1224 } else if (StructBase->isLValue()) {
1225 LValue LV = EmitLValue(StructBase);
1226 Address Addr = LV.getAddress();
1227 Res = Addr.emitRawPointer(*this);
1228 } else {
1229 return nullptr;
1230 }
1231
1232 RecIndicesTy Indices;
1233 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1234 if (Indices.empty())
1235 return nullptr;
1236
1237 Indices.push_back(Builder.getInt32(0));
1238 CanQualType T = CGM.getContext().getCanonicalTagType(RD);
1239 return Builder.CreateInBoundsGEP(ConvertType(T), Res,
1240 RecIndicesTy(llvm::reverse(Indices)),
1241 "counted_by.gep");
1242}
1243
1244/// This method is typically called in contexts where we can't generate
1245/// side-effects, like in __builtin_dynamic_object_size. When finding
1246/// expressions, only choose those that have either already been emitted or can
1247/// be loaded without side-effects.
1248///
1249/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1250/// within the top-level struct.
1251/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1253 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1254 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1255 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1256 getIntAlign(), "counted_by.load");
1257 return nullptr;
1258}
1259
1261 const Expr *ArrayExprBase,
1262 llvm::Value *IndexVal, QualType IndexType,
1263 bool Accessed) {
1264 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1265 "should not be called unless adding bounds checks");
1266 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1267 getLangOpts().getStrictFlexArraysLevel();
1268 QualType ArrayExprBaseType;
1269 llvm::Value *BoundsVal = getArrayIndexingBound(
1270 *this, ArrayExprBase, ArrayExprBaseType, StrictFlexArraysLevel);
1271
1272 EmitBoundsCheckImpl(ArrayExpr, ArrayExprBaseType, IndexVal, IndexType,
1273 BoundsVal, getContext().getSizeType(), Accessed);
1274}
1275
1277 QualType ArrayBaseType,
1278 llvm::Value *IndexVal,
1279 QualType IndexType,
1280 llvm::Value *BoundsVal,
1281 QualType BoundsType, bool Accessed) {
1282 if (!BoundsVal)
1283 return;
1284
1285 auto CheckKind = SanitizerKind::SO_ArrayBounds;
1286 auto CheckHandler = SanitizerHandler::OutOfBounds;
1287 SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler);
1288
1289 // All hail the C implicit type conversion rules!!!
1290 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1291 bool BoundsSigned = BoundsType->isSignedIntegerOrEnumerationType();
1292
1293 const ASTContext &Ctx = getContext();
1294 llvm::Type *Ty = ConvertType(
1295 Ctx.getTypeSize(IndexType) >= Ctx.getTypeSize(BoundsType) ? IndexType
1296 : BoundsType);
1297
1298 llvm::Value *IndexInst = Builder.CreateIntCast(IndexVal, Ty, IndexSigned);
1299 llvm::Value *BoundsInst = Builder.CreateIntCast(BoundsVal, Ty, false);
1300
1301 llvm::Constant *StaticData[] = {
1302 EmitCheckSourceLocation(ArrayExpr->getExprLoc()),
1303 EmitCheckTypeDescriptor(ArrayBaseType),
1304 EmitCheckTypeDescriptor(IndexType),
1305 };
1306
1307 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexInst, BoundsInst)
1308 : Builder.CreateICmpULE(IndexInst, BoundsInst);
1309
1310 if (BoundsSigned) {
1311 // Don't allow a negative bounds.
1312 llvm::Value *Cmp = Builder.CreateICmpSGT(
1313 BoundsVal, llvm::ConstantInt::get(BoundsVal->getType(), 0));
1314 Check = Builder.CreateAnd(Cmp, Check);
1315 }
1316
1317 EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData,
1318 IndexInst);
1319}
1320
1322 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, getContext());
1323 if (!ATMD)
1324 return nullptr;
1325
1326 llvm::MDBuilder MDB(getLLVMContext());
1327 auto *TypeNameMD = MDB.createString(ATMD->TypeName);
1328 auto *ContainsPtrC = Builder.getInt1(ATMD->ContainsPointer);
1329 auto *ContainsPtrMD = MDB.createConstant(ContainsPtrC);
1330
1331 // Format: !{<type-name>, <contains-pointer>}
1332 return llvm::MDNode::get(CGM.getLLVMContext(), {TypeNameMD, ContainsPtrMD});
1333}
1334
1335void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) {
1336 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1337 "Only needed with -fsanitize=alloc-token");
1338 CB->setMetadata(llvm::LLVMContext::MD_alloc_token,
1339 buildAllocToken(AllocType));
1340}
1341
1344 if (!AllocType.isNull())
1345 return buildAllocToken(AllocType);
1346 return nullptr;
1347}
1348
1349void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, const CallExpr *E) {
1350 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1351 "Only needed with -fsanitize=alloc-token");
1352 if (llvm::MDNode *MDN = buildAllocToken(E))
1353 CB->setMetadata(llvm::LLVMContext::MD_alloc_token, MDN);
1354}
1355
1358 bool isInc, bool isPre) {
1359 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1360
1361 llvm::Value *NextVal;
1362 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1363 uint64_t AmountVal = isInc ? 1 : -1;
1364 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1365
1366 // Add the inc/dec to the real part.
1367 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1368 } else {
1369 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1370 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1371 if (!isInc)
1372 FVal.changeSign();
1373 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1374
1375 // Add the inc/dec to the real part.
1376 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1377 }
1378
1379 ComplexPairTy IncVal(NextVal, InVal.second);
1380
1381 // Store the updated result through the lvalue.
1382 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1383 if (getLangOpts().OpenMP)
1384 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1385 E->getSubExpr());
1386
1387 // If this is a postinc, return the value read from memory, otherwise use the
1388 // updated value.
1389 return isPre ? IncVal : InVal;
1390}
1391
1393 CodeGenFunction *CGF) {
1394 // Bind VLAs in the cast type.
1395 if (CGF && E->getType()->isVariablyModifiedType())
1397
1398 if (CGDebugInfo *DI = getModuleDebugInfo())
1399 DI->EmitExplicitCastType(E->getType());
1400}
1401
1402//===----------------------------------------------------------------------===//
1403// LValue Expression Emission
1404//===----------------------------------------------------------------------===//
1405
1406static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx,
1407 CharUnits eltSize) {
1408 // If we have a constant index, we can use the exact offset of the
1409 // element we're accessing.
1410 if (auto *constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
1411 CharUnits offset = constantIdx->getZExtValue() * eltSize;
1412 return arrayAlign.alignmentAtOffset(offset);
1413 }
1414
1415 // Otherwise, use the worst-case alignment for any element.
1416 return arrayAlign.alignmentOfArrayElement(eltSize);
1417}
1418
1419/// Emit pointer + index arithmetic.
1421 const BinaryOperator *BO,
1422 LValueBaseInfo *BaseInfo,
1423 TBAAAccessInfo *TBAAInfo,
1424 KnownNonNull_t IsKnownNonNull) {
1425 assert(BO->isAdditiveOp() && "Expect an addition or subtraction.");
1426 Expr *pointerOperand = BO->getLHS();
1427 Expr *indexOperand = BO->getRHS();
1428 bool isSubtraction = BO->getOpcode() == BO_Sub;
1429
1430 Address BaseAddr = Address::invalid();
1431 llvm::Value *index = nullptr;
1432 // In a subtraction, the LHS is always the pointer.
1433 // Note: do not change the evaluation order.
1434 if (!isSubtraction && !pointerOperand->getType()->isAnyPointerType()) {
1435 std::swap(pointerOperand, indexOperand);
1436 index = CGF.EmitScalarExpr(indexOperand);
1437 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1439 } else {
1440 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1442 index = CGF.EmitScalarExpr(indexOperand);
1443 }
1444
1445 llvm::Value *pointer = BaseAddr.getBasePointer();
1446 llvm::Value *Res = CGF.EmitPointerArithmetic(
1447 BO, pointerOperand, pointer, indexOperand, index, isSubtraction);
1448 QualType PointeeTy = BO->getType()->getPointeeType();
1449 CharUnits Align =
1451 CGF.getContext().getTypeSizeInChars(PointeeTy));
1452 return Address(Res, CGF.ConvertTypeForMem(PointeeTy), Align,
1454 /*Offset=*/nullptr, IsKnownNonNull);
1455}
1456
1458 TBAAAccessInfo *TBAAInfo,
1459 KnownNonNull_t IsKnownNonNull,
1460 CodeGenFunction &CGF) {
1461 // We allow this with ObjC object pointers because of fragile ABIs.
1462 assert(E->getType()->isPointerType() ||
1464 E = E->IgnoreParens();
1465
1466 // Casts:
1467 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1468 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1469 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1470
1471 switch (CE->getCastKind()) {
1472 // Non-converting casts (but not C's implicit conversion from void*).
1473 case CK_BitCast:
1474 case CK_NoOp:
1475 case CK_AddressSpaceConversion:
1476 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1477 if (PtrTy->getPointeeType()->isVoidType())
1478 break;
1479
1480 LValueBaseInfo InnerBaseInfo;
1481 TBAAAccessInfo InnerTBAAInfo;
1483 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1484 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1485 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1486
1487 if (isa<ExplicitCastExpr>(CE)) {
1488 LValueBaseInfo TargetTypeBaseInfo;
1489 TBAAAccessInfo TargetTypeTBAAInfo;
1491 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1492 if (TBAAInfo)
1493 *TBAAInfo =
1494 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1495 // If the source l-value is opaque, honor the alignment of the
1496 // casted-to type.
1497 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1498 if (BaseInfo)
1499 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1500 Addr.setAlignment(Align);
1501 }
1502 }
1503
1504 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1505 CE->getCastKind() == CK_BitCast) {
1506 if (auto PT = E->getType()->getAs<PointerType>())
1507 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1508 /*MayBeNull=*/true,
1510 CE->getBeginLoc());
1511 }
1512
1513 llvm::Type *ElemTy =
1515 Addr = Addr.withElementType(ElemTy);
1516 if (CE->getCastKind() == CK_AddressSpaceConversion)
1518 Addr, CGF.ConvertType(E->getType()), ElemTy);
1519
1520 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1521 CE->getType());
1522 }
1523 break;
1524
1525 // Array-to-pointer decay.
1526 case CK_ArrayToPointerDecay:
1527 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1528
1529 // Derived-to-base conversions.
1530 case CK_UncheckedDerivedToBase:
1531 case CK_DerivedToBase: {
1532 // TODO: Support accesses to members of base classes in TBAA. For now, we
1533 // conservatively pretend that the complete object is of the base class
1534 // type.
1535 if (TBAAInfo)
1536 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1538 CE->getSubExpr(), BaseInfo, nullptr,
1539 (KnownNonNull_t)(IsKnownNonNull ||
1540 CE->getCastKind() == CK_UncheckedDerivedToBase));
1541 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1542 return CGF.GetAddressOfBaseClass(
1543 Addr, Derived, CE->path_begin(), CE->path_end(),
1544 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1545 }
1546
1547 // TODO: Is there any reason to treat base-to-derived conversions
1548 // specially?
1549 default:
1550 break;
1551 }
1552 }
1553
1554 // Unary &.
1555 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1556 if (UO->getOpcode() == UO_AddrOf) {
1557 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1558 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1559 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1560 return LV.getAddress();
1561 }
1562 }
1563
1564 // std::addressof and variants.
1565 if (auto *Call = dyn_cast<CallExpr>(E)) {
1566 switch (Call->getBuiltinCallee()) {
1567 default:
1568 break;
1569 case Builtin::BIaddressof:
1570 case Builtin::BI__addressof:
1571 case Builtin::BI__builtin_addressof: {
1572 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1573 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1574 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1575 return LV.getAddress();
1576 }
1577 }
1578 }
1579
1580 // Pointer arithmetic: pointer +/- index.
1581 if (auto *BO = dyn_cast<BinaryOperator>(E)) {
1582 if (BO->isAdditiveOp())
1583 return emitPointerArithmetic(CGF, BO, BaseInfo, TBAAInfo, IsKnownNonNull);
1584 }
1585
1586 // TODO: conditional operators, comma.
1587
1588 // Otherwise, use the alignment of the type.
1591 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1592}
1593
1594/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1595/// derive a more accurate bound on the alignment of the pointer.
1597 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1598 KnownNonNull_t IsKnownNonNull) {
1599 Address Addr =
1600 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1601 if (IsKnownNonNull && !Addr.isKnownNonNull())
1602 Addr.setKnownNonNull();
1603 return Addr;
1604}
1605
1607 llvm::Value *V = RV.getScalarVal();
1608 if (auto MPT = T->getAs<MemberPointerType>())
1609 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1610 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1611}
1612
1614 if (Ty->isVoidType())
1615 return RValue::get(nullptr);
1616
1617 switch (getEvaluationKind(Ty)) {
1618 case TEK_Complex: {
1619 llvm::Type *EltTy =
1621 llvm::Value *U = llvm::UndefValue::get(EltTy);
1622 return RValue::getComplex(std::make_pair(U, U));
1623 }
1624
1625 // If this is a use of an undefined aggregate type, the aggregate must have an
1626 // identifiable address. Just because the contents of the value are undefined
1627 // doesn't mean that the address can't be taken and compared.
1628 case TEK_Aggregate: {
1629 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1630 return RValue::getAggregate(DestPtr);
1631 }
1632
1633 case TEK_Scalar:
1634 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1635 }
1636 llvm_unreachable("bad evaluation kind");
1637}
1638
1640 const char *Name) {
1641 ErrorUnsupported(E, Name);
1642 return GetUndefRValue(E->getType());
1643}
1644
1646 const char *Name) {
1647 ErrorUnsupported(E, Name);
1648 llvm::Type *ElTy = ConvertType(E->getType());
1649 llvm::Type *Ty = DefaultPtrTy;
1650 return MakeAddrLValue(
1651 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1652}
1653
1655 const Expr *Base = Obj;
1656 while (!isa<CXXThisExpr>(Base)) {
1657 // The result of a dynamic_cast can be null.
1659 return false;
1660
1661 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1662 Base = CE->getSubExpr();
1663 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1664 Base = PE->getSubExpr();
1665 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1666 if (UO->getOpcode() == UO_Extension)
1667 Base = UO->getSubExpr();
1668 else
1669 return false;
1670 } else {
1671 return false;
1672 }
1673 }
1674 return true;
1675}
1676
1678 LValue LV;
1679 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1680 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1681 else
1682 LV = EmitLValue(E);
1683 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1684 SanitizerSet SkippedChecks;
1685 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1686 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1687 if (IsBaseCXXThis)
1688 SkippedChecks.set(SanitizerKind::Alignment, true);
1689 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1690 SkippedChecks.set(SanitizerKind::Null, true);
1691 }
1692 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1693 }
1694 return LV;
1695}
1696
1697/// EmitLValue - Emit code to compute a designator that specifies the location
1698/// of the expression.
1699///
1700/// This can return one of two things: a simple address or a bitfield reference.
1701/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1702/// an LLVM pointer type.
1703///
1704/// If this returns a bitfield reference, nothing about the pointee type of the
1705/// LLVM value is known: For example, it may not be a pointer to an integer.
1706///
1707/// If this returns a normal address, and if the lvalue's C type is fixed size,
1708/// this method guarantees that the returned pointer type will point to an LLVM
1709/// type of the same size of the lvalue's type. If the lvalue has a variable
1710/// length type, this is not possible.
1711///
1713 KnownNonNull_t IsKnownNonNull) {
1714 // Running with sufficient stack space to avoid deeply nested expressions
1715 // cause a stack overflow.
1716 LValue LV;
1717 CGM.runWithSufficientStackSpace(
1718 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1719
1720 if (IsKnownNonNull && !LV.isKnownNonNull())
1721 LV.setKnownNonNull();
1722 return LV;
1723}
1724
1725LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1726 KnownNonNull_t IsKnownNonNull) {
1727 ApplyDebugLocation DL(*this, E);
1728 switch (E->getStmtClass()) {
1729 default: return EmitUnsupportedLValue(E, "l-value expression");
1730
1731 case Expr::ObjCPropertyRefExprClass:
1732 llvm_unreachable("cannot emit a property reference directly");
1733
1734 case Expr::ObjCSelectorExprClass:
1736 case Expr::ObjCIsaExprClass:
1738 case Expr::BinaryOperatorClass:
1740 case Expr::CompoundAssignOperatorClass: {
1741 QualType Ty = E->getType();
1742 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1743 Ty = AT->getValueType();
1744 if (!Ty->isAnyComplexType())
1747 }
1748 case Expr::CallExprClass:
1749 case Expr::CXXMemberCallExprClass:
1750 case Expr::CXXOperatorCallExprClass:
1751 case Expr::UserDefinedLiteralClass:
1753 case Expr::CXXRewrittenBinaryOperatorClass:
1754 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1755 IsKnownNonNull);
1756 case Expr::VAArgExprClass:
1758 case Expr::DeclRefExprClass:
1760 case Expr::ConstantExprClass: {
1761 const ConstantExpr *CE = cast<ConstantExpr>(E);
1762 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE))
1764 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1765 }
1766 case Expr::ParenExprClass:
1767 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1768 case Expr::GenericSelectionExprClass:
1769 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1770 IsKnownNonNull);
1771 case Expr::PredefinedExprClass:
1773 case Expr::StringLiteralClass:
1775 case Expr::ObjCEncodeExprClass:
1777 case Expr::PseudoObjectExprClass:
1779 case Expr::InitListExprClass:
1781 case Expr::CXXTemporaryObjectExprClass:
1782 case Expr::CXXConstructExprClass:
1784 case Expr::CXXBindTemporaryExprClass:
1786 case Expr::CXXUuidofExprClass:
1788 case Expr::LambdaExprClass:
1789 return EmitAggExprToLValue(E);
1790
1791 case Expr::ExprWithCleanupsClass: {
1792 const auto *cleanups = cast<ExprWithCleanups>(E);
1793 RunCleanupsScope Scope(*this);
1794 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1795 if (LV.isSimple()) {
1796 // Defend against branches out of gnu statement expressions surrounded by
1797 // cleanups.
1798 Address Addr = LV.getAddress();
1799 llvm::Value *V = Addr.getBasePointer();
1800 Scope.ForceCleanup({&V});
1801 Addr.replaceBasePointer(V);
1802 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1803 LV.getBaseInfo(), LV.getTBAAInfo());
1804 }
1805 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1806 // bitfield lvalue or some other non-simple lvalue?
1807 return LV;
1808 }
1809
1810 case Expr::CXXDefaultArgExprClass: {
1811 auto *DAE = cast<CXXDefaultArgExpr>(E);
1812 CXXDefaultArgExprScope Scope(*this, DAE);
1813 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1814 }
1815 case Expr::CXXDefaultInitExprClass: {
1816 auto *DIE = cast<CXXDefaultInitExpr>(E);
1817 CXXDefaultInitExprScope Scope(*this, DIE);
1818 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1819 }
1820 case Expr::CXXTypeidExprClass:
1822
1823 case Expr::ObjCMessageExprClass:
1825 case Expr::ObjCIvarRefExprClass:
1827 case Expr::StmtExprClass:
1829 case Expr::UnaryOperatorClass:
1831 case Expr::ArraySubscriptExprClass:
1833 case Expr::MatrixSingleSubscriptExprClass:
1835 case Expr::MatrixSubscriptExprClass:
1837 case Expr::ArraySectionExprClass:
1839 case Expr::ExtVectorElementExprClass:
1841 case Expr::MatrixElementExprClass:
1843 case Expr::CXXThisExprClass:
1845 case Expr::MemberExprClass:
1847 case Expr::CompoundLiteralExprClass:
1849 case Expr::ConditionalOperatorClass:
1851 case Expr::BinaryConditionalOperatorClass:
1853 case Expr::ChooseExprClass:
1854 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1855 case Expr::OpaqueValueExprClass:
1857 case Expr::SubstNonTypeTemplateParmExprClass:
1858 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1859 IsKnownNonNull);
1860 case Expr::ImplicitCastExprClass:
1861 case Expr::CStyleCastExprClass:
1862 case Expr::CXXFunctionalCastExprClass:
1863 case Expr::CXXStaticCastExprClass:
1864 case Expr::CXXDynamicCastExprClass:
1865 case Expr::CXXReinterpretCastExprClass:
1866 case Expr::CXXConstCastExprClass:
1867 case Expr::CXXAddrspaceCastExprClass:
1868 case Expr::ObjCBridgedCastExprClass:
1869 return EmitCastLValue(cast<CastExpr>(E));
1870
1871 case Expr::MaterializeTemporaryExprClass:
1873
1874 case Expr::CoawaitExprClass:
1876 case Expr::CoyieldExprClass:
1878 case Expr::PackIndexingExprClass:
1879 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1880 case Expr::HLSLOutArgExprClass:
1881 llvm_unreachable("cannot emit a HLSL out argument directly");
1882 }
1883}
1884
1885/// Given an object of the given canonical type, can we safely copy a
1886/// value out of it based on its initializer?
1888 assert(type.isCanonical());
1889 assert(!type->isReferenceType());
1890
1891 // Must be const-qualified but non-volatile.
1892 Qualifiers qs = type.getLocalQualifiers();
1893 if (!qs.hasConst() || qs.hasVolatile()) return false;
1894
1895 // Otherwise, all object types satisfy this except C++ classes with
1896 // mutable subobjects or non-trivial copy/destroy behavior.
1897 if (const auto *RT = dyn_cast<RecordType>(type))
1898 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
1899 RD = RD->getDefinitionOrSelf();
1900 if (RD->hasMutableFields() || !RD->isTrivial())
1901 return false;
1902 }
1903
1904 return true;
1905}
1906
1907/// Can we constant-emit a load of a reference to a variable of the
1908/// given type? This is different from predicates like
1909/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1910/// in situations that don't necessarily satisfy the language's rules
1911/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1912/// to do this with const float variables even if those variables
1913/// aren't marked 'constexpr'.
1921 type = type.getCanonicalType();
1922 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1923 if (isConstantEmittableObjectType(ref->getPointeeType()))
1925 return CEK_AsReferenceOnly;
1926 }
1928 return CEK_AsValueOnly;
1929 return CEK_None;
1930}
1931
1932/// Try to emit a reference to the given value without producing it as
1933/// an l-value. This is just an optimization, but it avoids us needing
1934/// to emit global copies of variables if they're named without triggering
1935/// a formal use in a context where we can't emit a direct reference to them,
1936/// for instance if a block or lambda or a member of a local class uses a
1937/// const int variable or constexpr variable from an enclosing function.
1940 const ValueDecl *Value = RefExpr->getDecl();
1941
1942 // The value needs to be an enum constant or a constant variable.
1944 if (isa<ParmVarDecl>(Value)) {
1945 CEK = CEK_None;
1946 } else if (const auto *var = dyn_cast<VarDecl>(Value)) {
1947 CEK = checkVarTypeForConstantEmission(var->getType());
1948 } else if (isa<EnumConstantDecl>(Value)) {
1949 CEK = CEK_AsValueOnly;
1950 } else {
1951 CEK = CEK_None;
1952 }
1953 if (CEK == CEK_None) return ConstantEmission();
1954
1955 Expr::EvalResult result;
1956 bool resultIsReference;
1957 QualType resultType;
1958
1959 // It's best to evaluate all the way as an r-value if that's permitted.
1960 if (CEK != CEK_AsReferenceOnly &&
1961 RefExpr->EvaluateAsRValue(result, getContext())) {
1962 resultIsReference = false;
1963 resultType = RefExpr->getType().getUnqualifiedType();
1964
1965 // Otherwise, try to evaluate as an l-value.
1966 } else if (CEK != CEK_AsValueOnly &&
1967 RefExpr->EvaluateAsLValue(result, getContext())) {
1968 resultIsReference = true;
1969 resultType = Value->getType();
1970
1971 // Failure.
1972 } else {
1973 return ConstantEmission();
1974 }
1975
1976 // In any case, if the initializer has side-effects, abandon ship.
1977 if (result.HasSideEffects)
1978 return ConstantEmission();
1979
1980 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1981 // referencing a global host variable by copy. In this case the lambda should
1982 // make a copy of the value of the global host variable. The DRE of the
1983 // captured reference variable cannot be emitted as load from the host
1984 // global variable as compile time constant, since the host variable is not
1985 // accessible on device. The DRE of the captured reference variable has to be
1986 // loaded from captures.
1987 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1989 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1990 if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1991 const APValue::LValueBase &base = result.Val.getLValueBase();
1992 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1993 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1994 if (!VD->hasAttr<CUDADeviceAttr>()) {
1995 return ConstantEmission();
1996 }
1997 }
1998 }
1999 }
2000 }
2001
2002 // Emit as a constant.
2003 llvm::Constant *C = ConstantEmitter(*this).emitAbstract(
2004 RefExpr->getLocation(), result.Val, resultType);
2005
2006 // Make sure we emit a debug reference to the global variable.
2007 // This should probably fire even for
2008 if (isa<VarDecl>(Value)) {
2009 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(Value)))
2010 EmitDeclRefExprDbgValue(RefExpr, result.Val);
2011 } else {
2013 EmitDeclRefExprDbgValue(RefExpr, result.Val);
2014 }
2015
2016 // If we emitted a reference constant, we need to dereference that.
2017 if (resultIsReference)
2019
2021}
2022
2024 const MemberExpr *ME) {
2025 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
2026 // Try to emit static variable member expressions as DREs.
2027 return DeclRefExpr::Create(
2029 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
2030 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
2031 }
2032 return nullptr;
2033}
2034
2038 return tryEmitAsConstant(DRE);
2039 return ConstantEmission();
2040}
2041
2043 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
2044 assert(Constant && "not a constant");
2045 if (Constant.isReference())
2046 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
2047 E->getExprLoc())
2048 .getScalarVal();
2049 return Constant.getValue();
2050}
2051
2053 SourceLocation Loc) {
2054 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
2055 lvalue.getType(), Loc, lvalue.getBaseInfo(),
2056 lvalue.getTBAAInfo(), lvalue.isNontemporal());
2057}
2058
2059// This method SHOULD NOT be extended to support additional types, like BitInt
2060// types, without an opt-in bool controlled by a CodeGenOptions setting (like
2061// -fstrict-bool) and a new UBSan check (like SanitizerKind::Bool) as breaking
2062// that assumption would lead to memory corruption. See link for examples of how
2063// having a bool that has a value different from 0 or 1 in memory can lead to
2064// memory corruption.
2065// https://discourse.llvm.org/t/defining-what-happens-when-a-bool-isn-t-0-or-1/86778
2066static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min,
2067 llvm::APInt &End, bool StrictEnums, bool StrictBool,
2068 bool IsBool) {
2069 const auto *ED = Ty->getAsEnumDecl();
2070 bool IsRegularCPlusPlusEnum =
2071 CGF.getLangOpts().CPlusPlus && StrictEnums && ED && !ED->isFixed();
2072 if (!IsBool && !IsRegularCPlusPlusEnum)
2073 return false;
2074
2075 if (IsBool) {
2076 if (!StrictBool)
2077 return false;
2078 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
2079 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
2080 } else {
2081 ED->getValueRange(End, Min);
2082 }
2083 return true;
2084}
2085
2086llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
2087 llvm::APInt Min, End;
2088 bool IsBool = Ty->hasBooleanRepresentation() && !Ty->isVectorType();
2089 bool StrictBoolEnabled = CGM.getCodeGenOpts().getLoadBoolFromMem() ==
2091 if (!getRangeForType(*this, Ty, Min, End,
2092 /*StrictEnums=*/CGM.getCodeGenOpts().StrictEnums,
2093 /*StrictBool=*/StrictBoolEnabled, /*IsBool=*/IsBool))
2094 return nullptr;
2095
2096 llvm::MDBuilder MDHelper(getLLVMContext());
2097 return MDHelper.createRange(Min, End);
2098}
2099
2101 SourceLocation Loc) {
2102 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2103 // In order to prevent the optimizer from throwing away the check, don't
2104 // attach range metadata to the load.
2105 } else if (CGM.getCodeGenOpts().isOptimizedBuild()) {
2106 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2107 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2108 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2109 llvm::MDNode::get(CGM.getLLVMContext(), {}));
2110 }
2111 }
2112}
2113
2115 SourceLocation Loc) {
2116 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
2117 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
2118 if (!HasBoolCheck && !HasEnumCheck)
2119 return false;
2120
2121 bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) ||
2122 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
2123 bool NeedsBoolCheck = HasBoolCheck && IsBool;
2124 bool NeedsEnumCheck = HasEnumCheck && Ty->isEnumeralType();
2125 if (!NeedsBoolCheck && !NeedsEnumCheck)
2126 return false;
2127
2128 // Single-bit booleans don't need to be checked. Special-case this to avoid
2129 // a bit width mismatch when handling bitfield values. This is handled by
2130 // EmitFromMemory for the non-bitfield case.
2131 if (IsBool &&
2132 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
2133 return false;
2134
2135 if (NeedsEnumCheck &&
2136 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
2137 return false;
2138
2139 llvm::APInt Min, End;
2140 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true,
2141 /*StrictBool=*/true, IsBool))
2142 return true;
2143
2145 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
2146
2147 auto &Ctx = getLLVMContext();
2148 auto CheckHandler = SanitizerHandler::LoadInvalidValue;
2149 SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler);
2150 llvm::Value *Check;
2151 --End;
2152 if (!Min) {
2153 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
2154 } else {
2155 llvm::Value *Upper =
2156 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
2157 llvm::Value *Lower =
2158 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
2159 Check = Builder.CreateAnd(Upper, Lower);
2160 }
2161 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
2163 EmitCheck(std::make_pair(Check, Kind), CheckHandler, StaticArgs, Value);
2164 return true;
2165}
2166
2168 QualType Ty,
2169 SourceLocation Loc,
2170 LValueBaseInfo BaseInfo,
2171 TBAAAccessInfo TBAAInfo,
2172 bool isNontemporal) {
2173 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2174 if (GV->isThreadLocal())
2175 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2177
2178 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2179 // Boolean vectors use `iN` as storage type.
2180 if (ClangVecTy->isPackedVectorBoolType(getContext())) {
2181 llvm::Type *ValTy = ConvertType(Ty);
2182 unsigned ValNumElems =
2183 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2184 // Load the `iP` storage object (P is the padded vector size).
2185 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
2186 const auto *RawIntTy = RawIntV->getType();
2187 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
2188 // Bitcast iP --> <P x i1>.
2189 auto *PaddedVecTy = llvm::FixedVectorType::get(
2190 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2191 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2192 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2193 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2194
2195 return EmitFromMemory(V, Ty);
2196 }
2197
2198 // Handles vectors of sizes that are likely to be expanded to a larger size
2199 // to optimize performance.
2200 auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2201 auto *NewVecTy =
2202 CGM.getABIInfo().getOptimalVectorMemoryType(VTy, getLangOpts());
2203
2204 if (VTy != NewVecTy) {
2205 Address Cast = Addr.withElementType(NewVecTy);
2206 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2207 unsigned OldNumElements = VTy->getNumElements();
2208 SmallVector<int, 16> Mask(OldNumElements);
2209 std::iota(Mask.begin(), Mask.end(), 0);
2210 V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2211 return EmitFromMemory(V, Ty);
2212 }
2213 }
2214
2215 // Atomic operations have to be done on integral types.
2216 LValue AtomicLValue =
2217 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2218 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2219 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2220 }
2221
2222 Addr =
2223 Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
2224
2225 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2226 if (isNontemporal) {
2227 llvm::MDNode *Node = llvm::MDNode::get(
2228 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2229 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2230 }
2231
2232 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2233
2234 maybeAttachRangeForLoad(Load, Ty, Loc);
2235
2236 return EmitFromMemory(Load, Ty);
2237}
2238
2239/// Converts a scalar value from its primary IR type (as returned
2240/// by ConvertType) to its load/store type (as returned by
2241/// convertTypeForLoadStore).
2242llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2243 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2244 Ty = AtomicTy->getValueType();
2245
2246 if (Ty->isExtVectorBoolType() || Ty->isConstantMatrixBoolType()) {
2247 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2248
2249 if (Value->getType() == StoreTy)
2250 return Value;
2251
2252 if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() >
2253 Value->getType()->getScalarSizeInBits())
2254 return Builder.CreateZExt(Value, StoreTy);
2255
2256 // Expand to the memory bit width.
2257 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2258 // <N x i1> --> <P x i1>.
2259 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2260 // <P x i1> --> iP.
2261 Value = Builder.CreateBitCast(Value, StoreTy);
2262 }
2263
2264 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) {
2265 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2267 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2268 }
2269
2270 return Value;
2271}
2272
2273/// Converts a scalar value from its load/store type (as returned
2274/// by convertTypeForLoadStore) to its primary IR type (as returned
2275/// by ConvertType).
2276llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2277 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2278 Ty = AtomicTy->getValueType();
2279
2281 const auto *RawIntTy = Value->getType();
2282
2283 // Bitcast iP --> <P x i1>.
2284 auto *PaddedVecTy = llvm::FixedVectorType::get(
2285 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2286 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2287 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2288 llvm::Type *ValTy = ConvertType(Ty);
2289 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2290 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2291 }
2292
2293 llvm::Type *ResTy = ConvertType(Ty);
2294 bool HasBoolRep = Ty->hasBooleanRepresentation() || Ty->isExtVectorBoolType();
2295 if (HasBoolRep && CGM.getCodeGenOpts().isConvertingBoolWithCmp0()) {
2296 return Builder.CreateICmpNE(
2297 Value, llvm::Constant::getNullValue(Value->getType()), "loadedv");
2298 }
2299 if (HasBoolRep || Ty->isBitIntType())
2300 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2301
2302 return Value;
2303}
2304
2305// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2306// MatrixType), if it points to a array (the memory type of MatrixType).
2308 CodeGenFunction &CGF,
2309 bool IsVector = true) {
2310 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2311 if (ArrayTy && IsVector) {
2312 auto ArrayElements = ArrayTy->getNumElements();
2313 auto *ArrayElementTy = ArrayTy->getElementType();
2314 if (CGF.getContext().getLangOpts().HLSL) {
2315 auto *VectorTy = cast<llvm::FixedVectorType>(ArrayElementTy);
2316 ArrayElementTy = VectorTy->getElementType();
2317 ArrayElements *= VectorTy->getNumElements();
2318 }
2319 auto *VectorTy = llvm::FixedVectorType::get(ArrayElementTy, ArrayElements);
2320
2321 return Addr.withElementType(VectorTy);
2322 }
2323 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2324 if (VectorTy && !IsVector) {
2325 auto *ArrayTy = llvm::ArrayType::get(
2326 VectorTy->getElementType(),
2327 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2328
2329 return Addr.withElementType(ArrayTy);
2330 }
2331
2332 return Addr;
2333}
2334
2336 LValue Base;
2337 if (E->getBase()->isGLValue())
2338 Base = EmitLValue(E->getBase());
2339 else {
2340 assert(E->getBase()->getType()->isConstantMatrixType() &&
2341 "Result must be a Constant Matrix");
2342 llvm::Value *Mat = EmitScalarExpr(E->getBase());
2343 Address MatMem = CreateMemTemp(E->getBase()->getType());
2344 QualType Ty = E->getBase()->getType();
2345 llvm::Type *LTy = convertTypeForLoadStore(Ty, Mat->getType());
2346 if (LTy->getScalarSizeInBits() > Mat->getType()->getScalarSizeInBits())
2347 Mat = Builder.CreateZExt(Mat, LTy);
2348 Builder.CreateStore(Mat, MatMem);
2350 }
2351 QualType ResultType =
2352 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2353
2354 // Encode the element access list into a vector of unsigned indices.
2355 // getEncodedElementAccess returns row-major linearized indices.
2357 E->getEncodedElementAccess(Indices);
2358
2359 // getEncodedElementAccess returns row-major linearized indices
2360 // If the matrix memory layout is column-major, convert indices
2361 // to column-major indices.
2362 bool IsColMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2364 if (IsColMajor) {
2365 const auto *MT = E->getBase()->getType()->castAs<ConstantMatrixType>();
2366 unsigned NumCols = MT->getNumColumns();
2367 for (uint32_t &Idx : Indices) {
2368 // Decompose row-major index: Row = Idx / NumCols, Col = Idx % NumCols
2369 unsigned Row = Idx / NumCols;
2370 unsigned Col = Idx % NumCols;
2371 // Re-linearize as column-major
2372 Idx = MT->getColumnMajorFlattenedIndex(Row, Col);
2373 }
2374 }
2375
2376 if (Base.isSimple()) {
2377 RawAddress MatAddr = Base.getAddress();
2378 if (getLangOpts().HLSL &&
2380 MatAddr = CGM.getHLSLRuntime().createBufferMatrixTempAddress(
2381 Base, E->getExprLoc(), *this);
2382
2383 llvm::Constant *CV =
2384 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
2386 CV, ResultType, Base.getBaseInfo(),
2387 TBAAAccessInfo());
2388 }
2389 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2390
2391 llvm::Constant *BaseElts = Base.getExtVectorElts();
2393
2394 for (unsigned Index : Indices)
2395 CElts.push_back(BaseElts->getAggregateElement(Index));
2396 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2397
2399 MaybeConvertMatrixAddress(Base.getExtVectorAddress(), *this), CV,
2400 ResultType, Base.getBaseInfo(), TBAAAccessInfo());
2401}
2402
2403// Emit a store of a matrix LValue. This may require casting the original
2404// pointer to memory address (ArrayType) to a pointer to the value type
2405// (VectorType).
2406static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2407 bool isInit, CodeGenFunction &CGF) {
2408 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2409 value->getType()->isVectorTy());
2410 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2411 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2412 lvalue.isNontemporal());
2413}
2414
2416 bool Volatile, QualType Ty,
2417 LValueBaseInfo BaseInfo,
2418 TBAAAccessInfo TBAAInfo,
2419 bool isInit, bool isNontemporal) {
2420 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2421 if (GV->isThreadLocal())
2422 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2424
2425 // Handles vectors of sizes that are likely to be expanded to a larger size
2426 // to optimize performance.
2427 llvm::Type *SrcTy = Value->getType();
2428 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2429 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2430 auto *NewVecTy =
2431 CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts());
2432 if (!ClangVecTy->isPackedVectorBoolType(getContext()) &&
2433 VecTy != NewVecTy) {
2434 SmallVector<int, 16> Mask(NewVecTy->getNumElements(),
2435 VecTy->getNumElements());
2436 std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2437 // Use undef instead of poison for the padding lanes, to make sure no
2438 // padding bits are poisoned, which may break coercion.
2439 Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
2440 Mask, "extractVec");
2441 SrcTy = NewVecTy;
2442 }
2443 if (Addr.getElementType() != SrcTy)
2444 Addr = Addr.withElementType(SrcTy);
2445 }
2446 }
2447
2448 Value = EmitToMemory(Value, Ty);
2449
2450 LValue AtomicLValue =
2451 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2452 if (Ty->isAtomicType() ||
2453 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2454 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2455 return;
2456 }
2457
2458 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2460
2461 if (isNontemporal) {
2462 llvm::MDNode *Node =
2463 llvm::MDNode::get(Store->getContext(),
2464 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2465 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2466 }
2467
2468 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2469}
2470
2471void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2472 bool isInit) {
2473 if (lvalue.getType()->isConstantMatrixType()) {
2474 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2475 return;
2476 }
2477
2478 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2479 lvalue.getType(), lvalue.getBaseInfo(),
2480 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2481}
2482
2483// Emit a load of a LValue of matrix type. This may require casting the pointer
2484// to memory address (ArrayType) to a pointer to the value type (VectorType).
2486 CodeGenFunction &CGF) {
2487 assert(LV.getType()->isConstantMatrixType());
2488 RawAddress DestAddr = LV.getAddress();
2489
2490 // HLSL constant buffers may pad matrix layouts, so copy elements into a
2491 // non-padded local alloca before loading.
2492 if (CGF.getLangOpts().HLSL &&
2493 LV.getType().getAddressSpace() == LangAS::hlsl_constant)
2494 DestAddr =
2496
2497 Address Addr = MaybeConvertMatrixAddress(DestAddr, CGF);
2498 LV.setAddress(Addr);
2499 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2500}
2501
2503 SourceLocation Loc) {
2504 QualType Ty = LV.getType();
2505 switch (getEvaluationKind(Ty)) {
2506 case TEK_Scalar:
2507 return EmitLoadOfLValue(LV, Loc);
2508 case TEK_Complex:
2509 return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
2510 case TEK_Aggregate:
2511 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2512 return Slot.asRValue();
2513 }
2514 llvm_unreachable("bad evaluation kind");
2515}
2516
2517/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2518/// method emits the address of the lvalue, then loads the result as an rvalue,
2519/// returning the rvalue.
2521 // Load from __ptrauth.
2522 if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) {
2524 llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal();
2525 return RValue::get(EmitPointerAuthUnqualify(PtrAuth, Value, LV.getType(),
2526 LV.getAddress(),
2527 /*known nonnull*/ false));
2528 }
2529
2530 if (LV.isObjCWeak()) {
2531 // load of a __weak object.
2532 Address AddrWeakObj = LV.getAddress();
2533 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2534 AddrWeakObj));
2535 }
2537 // In MRC mode, we do a load+autorelease.
2538 if (!getLangOpts().ObjCAutoRefCount) {
2540 }
2541
2542 // In ARC mode, we load retained and then consume the value.
2543 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2545 return RValue::get(Object);
2546 }
2547
2548 if (LV.isSimple()) {
2549 assert(!LV.getType()->isFunctionType());
2550
2551 if (LV.getType()->isConstantMatrixType())
2552 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2553
2554 // Everything needs a load.
2555 return RValue::get(EmitLoadOfScalar(LV, Loc));
2556 }
2557
2558 if (LV.isVectorElt()) {
2559 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2560 LV.isVolatileQualified());
2561 llvm::Value *Elt =
2562 Builder.CreateExtractElement(Load, LV.getVectorIdx(), "vecext");
2563 return RValue::get(EmitFromMemory(Elt, LV.getType()));
2564 }
2565
2566 // If this is a reference to a subset of the elements of a vector, either
2567 // shuffle the input or extract/insert them as appropriate.
2568 if (LV.isExtVectorElt()) {
2570 }
2571
2572 // Global Register variables always invoke intrinsics
2573 if (LV.isGlobalReg())
2574 return EmitLoadOfGlobalRegLValue(LV);
2575
2576 if (LV.isMatrixElt()) {
2577 llvm::Value *Idx = LV.getMatrixIdx();
2578 QualType EltTy = LV.getType();
2579 if (const auto *MatTy = EltTy->getAs<ConstantMatrixType>()) {
2580 EltTy = MatTy->getElementType();
2581 if (CGM.getCodeGenOpts().isOptimizedBuild()) {
2582 llvm::MatrixBuilder MB(Builder);
2583 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2584 }
2585 }
2586 llvm::LoadInst *Load =
2587 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2588 llvm::Value *Elt = Builder.CreateExtractElement(Load, Idx, "matrixext");
2589 return RValue::get(EmitFromMemory(Elt, EltTy));
2590 }
2591 if (LV.isMatrixRow()) {
2592 QualType MatTy = LV.getType();
2593 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
2594
2595 unsigned NumRows = MT->getNumRows();
2596 unsigned NumCols = MT->getNumColumns();
2597 unsigned NumLanes = NumCols;
2598 llvm::Value *MatrixVec = EmitLoadOfScalar(LV, Loc);
2599 llvm::Value *Row = LV.getMatrixRowIdx();
2600 llvm::Type *ElemTy = ConvertType(MT->getElementType());
2601 llvm::Constant *ColConstsIndices = nullptr;
2602 llvm::MatrixBuilder MB(Builder);
2603
2604 if (LV.isMatrixRowSwizzle()) {
2605 ColConstsIndices = LV.getMatrixRowElts();
2606 NumLanes = llvm::cast<llvm::FixedVectorType>(ColConstsIndices->getType())
2607 ->getNumElements();
2608 }
2609
2610 llvm::Type *RowTy = llvm::FixedVectorType::get(ElemTy, NumLanes);
2611 llvm::Value *Result = llvm::PoisonValue::get(RowTy); // <NumLanes x T>
2612
2613 for (unsigned Col = 0; Col < NumLanes; ++Col) {
2614 llvm::Value *ColIdx;
2615 if (ColConstsIndices)
2616 ColIdx = ColConstsIndices->getAggregateElement(Col);
2617 else
2618 ColIdx = llvm::ConstantInt::get(Row->getType(), Col);
2619 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2621 llvm::Value *EltIndex =
2622 MB.CreateIndex(Row, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
2623 llvm::Value *Elt = Builder.CreateExtractElement(MatrixVec, EltIndex);
2624 llvm::Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2625 Result = Builder.CreateInsertElement(Result, Elt, Lane);
2626 }
2627
2628 return RValue::get(Result);
2629 }
2630
2631 assert(LV.isBitField() && "Unknown LValue type!");
2632 return EmitLoadOfBitfieldLValue(LV, Loc);
2633}
2634
2636 SourceLocation Loc) {
2637 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2638
2639 // Get the output type.
2640 llvm::Type *ResLTy = ConvertType(LV.getType());
2641
2642 Address Ptr = LV.getBitFieldAddress();
2643 llvm::Value *Val =
2644 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2645
2646 bool UseVolatile = LV.isVolatileQualified() &&
2647 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2648 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2649 const unsigned StorageSize =
2650 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2651 if (Info.IsSigned) {
2652 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2653 unsigned HighBits = StorageSize - Offset - Info.Size;
2654 if (HighBits)
2655 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2656 if (Offset + HighBits)
2657 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2658 } else {
2659 if (Offset)
2660 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2661 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2662 Val = Builder.CreateAnd(
2663 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2664 }
2665 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2666 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2667 return RValue::get(Val);
2668}
2669
2670// If this is a reference to a subset of the elements of a vector, create an
2671// appropriate shufflevector.
2673 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2674 LV.isVolatileQualified());
2675
2676 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2677 // IR value to a vector here allows the rest of codegen to behave as normal.
2678 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2679 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2680 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2681 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2682 }
2683
2684 const llvm::Constant *Elts = LV.getExtVectorElts();
2685
2686 // If the result of the expression is a non-vector type, we must be extracting
2687 // a single element. Just codegen as an extractelement.
2688 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2689 if (!ExprVT) {
2690 unsigned InIdx = getAccessedFieldNo(0, Elts);
2691 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2692
2693 llvm::Value *Element = Builder.CreateExtractElement(Vec, Elt);
2694
2695 llvm::Type *LVTy = ConvertType(LV.getType());
2696 if (Element->getType()->getPrimitiveSizeInBits() >
2697 LVTy->getPrimitiveSizeInBits()) {
2698 if (LV.getType()->hasBooleanRepresentation() &&
2699 CGM.getCodeGenOpts().isConvertingBoolWithCmp0())
2700 Element = Builder.CreateICmpNE(
2701 Element, llvm::Constant::getNullValue(Element->getType()));
2702 else
2703 Element = Builder.CreateTrunc(Element, LVTy);
2704 }
2705
2706 return RValue::get(Element);
2707 }
2708
2709 // Always use shuffle vector to try to retain the original program structure
2710 unsigned NumResultElts = ExprVT->getNumElements();
2711
2713 for (unsigned i = 0; i != NumResultElts; ++i)
2714 Mask.push_back(getAccessedFieldNo(i, Elts));
2715
2716 Vec = Builder.CreateShuffleVector(Vec, Mask);
2717
2718 if (LV.getType()->isExtVectorBoolType()) {
2719 if (CGM.getCodeGenOpts().isConvertingBoolWithCmp0())
2720 Vec = Builder.CreateICmpNE(Vec,
2721 llvm::Constant::getNullValue(Vec->getType()));
2722 else
2723 Vec = Builder.CreateTrunc(Vec, ConvertType(LV.getType()), "truncv");
2724 }
2725
2726 return RValue::get(Vec);
2727}
2728
2729/// Generates lvalue for partial ext_vector access.
2731 Address VectorAddress = LV.getExtVectorAddress();
2732 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2733 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2734
2735 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2736
2737 const llvm::Constant *Elts = LV.getExtVectorElts();
2738 unsigned ix = getAccessedFieldNo(0, Elts);
2739
2740 Address VectorBasePtrPlusIx =
2741 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2742 "vector.elt");
2743
2744 return VectorBasePtrPlusIx;
2745}
2746
2747/// Load of global named registers are always calls to intrinsics.
2749 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2750 "Bad type for register variable");
2751 llvm::MDNode *RegName = cast<llvm::MDNode>(
2752 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2753
2754 // We accept integer and pointer types only
2755 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2756 llvm::Type *Ty = OrigTy;
2757 if (OrigTy->isPointerTy())
2758 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2759 llvm::Type *Types[] = { Ty };
2760
2761 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2762 llvm::Value *Call = Builder.CreateCall(
2763 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2764 if (OrigTy->isPointerTy())
2765 Call = Builder.CreateIntToPtr(Call, OrigTy);
2766 return RValue::get(Call);
2767}
2768
2769/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2770/// lvalue, where both are guaranteed to the have the same type, and that type
2771/// is 'Ty'.
2773 bool isInit) {
2774 if (!Dst.isSimple()) {
2775 if (Dst.isVectorElt()) {
2776 if (getLangOpts().HLSL) {
2777 // HLSL allows direct access to vector elements, so storing to
2778 // individual elements of a vector through VectorElt is handled as
2779 // separate store instructions.
2780 Address DstAddr = Dst.getVectorAddress();
2781 llvm::Type *DestAddrTy = DstAddr.getElementType();
2782 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2784 CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2785
2786 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2787 "vector element type must be at least byte-sized");
2788
2789 llvm::Value *Val = Src.getScalarVal();
2790 if (Val->getType()->getPrimitiveSizeInBits() <
2791 ElemTy->getScalarSizeInBits())
2792 Val = Builder.CreateZExt(Val, ElemTy->getScalarType());
2793
2794 llvm::Value *Idx = Dst.getVectorIdx();
2795 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2796 Address DstElemAddr =
2797 Builder.CreateGEP(DstAddr, {Zero, Idx}, DestAddrTy, ElemAlign);
2798 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
2799 return;
2800 }
2801
2802 // Read/modify/write the vector, inserting the new element.
2803 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2804 Dst.isVolatileQualified());
2805 llvm::Type *VecTy = Vec->getType();
2806 llvm::Value *SrcVal = Src.getScalarVal();
2807
2808 if (VecTy->isVectorTy() && SrcVal->getType()->getPrimitiveSizeInBits() <
2809 VecTy->getScalarSizeInBits())
2810 SrcVal = Builder.CreateZExt(SrcVal, VecTy->getScalarType());
2811
2812 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2813 if (IRStoreTy) {
2814 auto *IRVecTy = llvm::FixedVectorType::get(
2815 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2816 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2817 // iN --> <N x i1>.
2818 }
2819
2820 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2821 // types which are mapped to vector LLVM IR types (e.g. for implementing
2822 // an ABI).
2823 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2824 EltTy && EltTy->getNumElements() == 1)
2825 SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2826
2827 Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2828 "vecins");
2829 if (IRStoreTy) {
2830 // <N x i1> --> <iN>.
2831 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2832 }
2833
2834 auto *I = Builder.CreateStore(Vec, Dst.getVectorAddress(),
2835 Dst.isVolatileQualified());
2837 return;
2838 }
2839
2840 // If this is an update of extended vector elements, insert them as
2841 // appropriate.
2842 if (Dst.isExtVectorElt())
2844
2845 if (Dst.isGlobalReg())
2846 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2847
2848 if (Dst.isMatrixElt()) {
2849 if (getLangOpts().HLSL) {
2850 // HLSL allows direct access to matrix elements, so storing to
2851 // individual elements of a matrix through MatrixElt is handled as
2852 // separate store instructions.
2853 Address DstAddr = Dst.getMatrixAddress();
2854 llvm::Type *DestAddrTy = DstAddr.getElementType();
2855 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2857 CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2858
2859 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2860 "matrix element type must be at least byte-sized");
2861
2862 llvm::Value *Val = Src.getScalarVal();
2863 if (Val->getType()->getPrimitiveSizeInBits() <
2864 ElemTy->getScalarSizeInBits())
2865 Val = Builder.CreateZExt(Val, ElemTy->getScalarType());
2866
2867 llvm::Value *Idx = Dst.getMatrixIdx();
2868 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2869 Address DstElemAddr =
2870 Builder.CreateGEP(DstAddr, {Zero, Idx}, DestAddrTy, ElemAlign);
2871 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
2872 return;
2873 }
2874
2875 llvm::Value *Idx = Dst.getMatrixIdx();
2876 if (CGM.getCodeGenOpts().isOptimizedBuild()) {
2877 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2878 llvm::MatrixBuilder MB(Builder);
2879 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2880 }
2881 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2882 llvm::Value *InsertVal = Src.getScalarVal();
2883 llvm::Value *Vec =
2884 Builder.CreateInsertElement(Load, InsertVal, Idx, "matins");
2885 auto *I = Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2886 Dst.isVolatileQualified());
2888 return;
2889 }
2890 if (Dst.isMatrixRow()) {
2891 // NOTE: Since there are no other languages that implement matrix single
2892 // subscripting, the logic here is specific to HLSL which allows
2893 // per-element stores to rows of matrices.
2894 assert(getLangOpts().HLSL &&
2895 "Store through matrix row LValues is only implemented for HLSL!");
2896 QualType MatTy = Dst.getType();
2897 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
2898
2899 unsigned NumRows = MT->getNumRows();
2900 unsigned NumCols = MT->getNumColumns();
2901 unsigned NumLanes = NumCols;
2902
2903 Address DstAddr = Dst.getMatrixAddress();
2904 llvm::Type *DestAddrTy = DstAddr.getElementType();
2905 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2906 CharUnits ElemAlign =
2907 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2908
2909 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2910 "matrix element type must be at least byte-sized");
2911
2912 llvm::Value *RowVal = Src.getScalarVal();
2913 if (RowVal->getType()->getScalarType()->getPrimitiveSizeInBits() <
2914 ElemTy->getScalarSizeInBits()) {
2915 auto *RowValVecTy = cast<llvm::FixedVectorType>(RowVal->getType());
2916 llvm::Type *StorageElmTy = llvm::FixedVectorType::get(
2917 ElemTy->getScalarType(), RowValVecTy->getNumElements());
2918 RowVal = Builder.CreateZExt(RowVal, StorageElmTy);
2919 }
2920
2921 llvm::MatrixBuilder MB(Builder);
2922
2923 llvm::Constant *ColConstsIndices = nullptr;
2924 if (Dst.isMatrixRowSwizzle()) {
2925 ColConstsIndices = Dst.getMatrixRowElts();
2926 NumLanes =
2927 llvm::cast<llvm::FixedVectorType>(ColConstsIndices->getType())
2928 ->getNumElements();
2929 }
2930
2931 llvm::Value *Row = Dst.getMatrixRowIdx();
2932 for (unsigned Col = 0; Col < NumLanes; ++Col) {
2933 llvm::Value *ColIdx;
2934 if (ColConstsIndices)
2935 ColIdx = ColConstsIndices->getAggregateElement(Col);
2936 else
2937 ColIdx = llvm::ConstantInt::get(Row->getType(), Col);
2938 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2940 llvm::Value *EltIndex =
2941 MB.CreateIndex(Row, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
2942 llvm::Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2943 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2944 llvm::Value *NewElt = Builder.CreateExtractElement(RowVal, Lane);
2945 Address DstElemAddr =
2946 Builder.CreateGEP(DstAddr, {Zero, EltIndex}, DestAddrTy, ElemAlign);
2947 Builder.CreateStore(NewElt, DstElemAddr, Dst.isVolatileQualified());
2948 }
2949
2950 return;
2951 }
2952
2953 assert(Dst.isBitField() && "Unknown LValue type");
2954 return EmitStoreThroughBitfieldLValue(Src, Dst);
2955 }
2956
2957 // Handle __ptrauth qualification by re-signing the value.
2958 if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) {
2959 Src = RValue::get(EmitPointerAuthQualify(PointerAuth, Src.getScalarVal(),
2960 Dst.getType(), Dst.getAddress(),
2961 /*known nonnull*/ false));
2962 }
2963
2964 // There's special magic for assigning into an ARC-qualified l-value.
2965 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2966 switch (Lifetime) {
2968 llvm_unreachable("present but none");
2969
2971 // nothing special
2972 break;
2973
2975 if (isInit) {
2976 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2977 break;
2978 }
2979 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2980 return;
2981
2983 if (isInit)
2984 // Initialize and then skip the primitive store.
2986 else
2988 /*ignore*/ true);
2989 return;
2990
2993 Src.getScalarVal()));
2994 // fall into the normal path
2995 break;
2996 }
2997 }
2998
2999 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
3000 // load of a __weak object.
3001 Address LvalueDst = Dst.getAddress();
3002 llvm::Value *src = Src.getScalarVal();
3003 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
3004 return;
3005 }
3006
3007 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
3008 // load of a __strong object.
3009 Address LvalueDst = Dst.getAddress();
3010 llvm::Value *src = Src.getScalarVal();
3011 if (Dst.isObjCIvar()) {
3012 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
3013 llvm::Type *ResultType = IntPtrTy;
3015 llvm::Value *RHS = dst.emitRawPointer(*this);
3016 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
3017 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
3018 ResultType, "sub.ptr.lhs.cast");
3019 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
3020 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
3021 } else if (Dst.isGlobalObjCRef()) {
3022 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
3023 Dst.isThreadLocalRef());
3024 }
3025 else
3026 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
3027 return;
3028 }
3029
3030 assert(Src.isScalar() && "Can't emit an agg store with this method");
3031 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
3032}
3033
3035 llvm::Value **Result) {
3036 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
3037 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
3038 Address Ptr = Dst.getBitFieldAddress();
3039
3040 // Get the source value, truncated to the width of the bit-field.
3041 llvm::Value *SrcVal = Src.getScalarVal();
3042
3043 // Cast the source to the storage type and shift it into place.
3044 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
3045 /*isSigned=*/false);
3046 llvm::Value *MaskedVal = SrcVal;
3047
3048 const bool UseVolatile =
3049 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
3050 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
3051 const unsigned StorageSize =
3052 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
3053 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
3054 // See if there are other bits in the bitfield's storage we'll need to load
3055 // and mask together with source before storing.
3056 if (StorageSize != Info.Size) {
3057 assert(StorageSize > Info.Size && "Invalid bitfield size.");
3058 llvm::Value *Val =
3059 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
3060
3061 // Mask the source value as needed.
3062 if (!Dst.getType()->hasBooleanRepresentation())
3063 SrcVal = Builder.CreateAnd(
3064 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
3065 "bf.value");
3066 MaskedVal = SrcVal;
3067 if (Offset)
3068 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
3069
3070 // Mask out the original value.
3071 Val = Builder.CreateAnd(
3072 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
3073 "bf.clear");
3074
3075 // Or together the unchanged values and the source value.
3076 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
3077 } else {
3078 assert(Offset == 0);
3079 // According to the AACPS:
3080 // When a volatile bit-field is written, and its container does not overlap
3081 // with any non-bit-field member, its container must be read exactly once
3082 // and written exactly once using the access width appropriate to the type
3083 // of the container. The two accesses are not atomic.
3084 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
3085 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
3086 Builder.CreateLoad(Ptr, true, "bf.load");
3087 }
3088
3089 // Write the new value back out.
3090 auto *I = Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
3091 addInstToCurrentSourceAtom(I, SrcVal);
3092
3093 // Return the new value of the bit-field, if requested.
3094 if (Result) {
3095 llvm::Value *ResultVal = MaskedVal;
3096
3097 // Sign extend the value if needed.
3098 if (Info.IsSigned) {
3099 assert(Info.Size <= StorageSize);
3100 unsigned HighBits = StorageSize - Info.Size;
3101 if (HighBits) {
3102 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
3103 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
3104 }
3105 }
3106
3107 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
3108 "bf.result.cast");
3109 *Result = EmitFromMemory(ResultVal, Dst.getType());
3110 }
3111}
3112
3114 LValue Dst) {
3115 llvm::Value *SrcVal = Src.getScalarVal();
3116 Address DstAddr = Dst.getExtVectorAddress();
3117 const llvm::Constant *Elts = Dst.getExtVectorElts();
3118 if (DstAddr.getElementType()->getScalarSizeInBits() >
3119 SrcVal->getType()->getScalarSizeInBits())
3120 SrcVal = Builder.CreateZExt(
3121 SrcVal, convertTypeForLoadStore(Dst.getType(), SrcVal->getType()));
3122
3123 if (getLangOpts().HLSL) {
3124 llvm::Type *DestAddrTy = DstAddr.getElementType();
3125 // HLSL allows storing to scalar values through ExtVector component LValues.
3126 // To support this we need to handle the case where the destination address
3127 // is a scalar.
3128 if (!DestAddrTy->isVectorTy()) {
3129 assert(!Dst.getType()->isVectorType() &&
3130 "this should only occur for non-vector l-values");
3131 Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified());
3132 return;
3133 }
3134
3135 // HLSL allows direct access to vector elements, so storing to individual
3136 // elements of a vector through ExtVector is handled as separate store
3137 // instructions.
3138 // If we are updating multiple elements, Dst and Src are vectors; for
3139 // a single element update they are scalars.
3140 const VectorType *VTy = Dst.getType()->getAs<VectorType>();
3141 unsigned NumSrcElts = VTy ? VTy->getNumElements() : 1;
3143 CGM.getDataLayout().getPrefTypeAlign(DestAddrTy->getScalarType()));
3144 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
3145
3146 for (unsigned I = 0; I != NumSrcElts; ++I) {
3147 llvm::Value *Val = VTy ? Builder.CreateExtractElement(
3148 SrcVal, llvm::ConstantInt::get(Int32Ty, I))
3149 : SrcVal;
3150 unsigned FieldNo = getAccessedFieldNo(I, Elts);
3151 Address DstElemAddr = Address::invalid();
3152 if (FieldNo == 0)
3153 DstElemAddr = DstAddr.withAlignment(ElemAlign);
3154 else
3155 DstElemAddr = Builder.CreateGEP(
3156 DstAddr, {Zero, llvm::ConstantInt::get(Int32Ty, FieldNo)},
3157 DestAddrTy, ElemAlign);
3158 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
3159 }
3160 return;
3161 }
3162
3163 // This access turns into a read/modify/write of the vector. Load the input
3164 // value now.
3165 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
3166 llvm::Type *VecTy = Vec->getType();
3167
3168 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
3169 unsigned NumSrcElts = VTy->getNumElements();
3170 unsigned NumDstElts = cast<llvm::FixedVectorType>(VecTy)->getNumElements();
3171 if (NumDstElts == NumSrcElts) {
3172 // Use shuffle vector is the src and destination are the same number of
3173 // elements and restore the vector mask since it is on the side it will be
3174 // stored.
3175 SmallVector<int, 4> Mask(NumDstElts);
3176 for (unsigned i = 0; i != NumSrcElts; ++i)
3177 Mask[getAccessedFieldNo(i, Elts)] = i;
3178
3179 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
3180 } else if (NumDstElts > NumSrcElts) {
3181 // Extended the source vector to the same length and then shuffle it
3182 // into the destination.
3183 // FIXME: since we're shuffling with undef, can we just use the indices
3184 // into that? This could be simpler.
3185 SmallVector<int, 4> ExtMask;
3186 for (unsigned i = 0; i != NumSrcElts; ++i)
3187 ExtMask.push_back(i);
3188 ExtMask.resize(NumDstElts, -1);
3189 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
3190 // build identity
3192 for (unsigned i = 0; i != NumDstElts; ++i)
3193 Mask.push_back(i);
3194
3195 // When the vector size is odd and .odd or .hi is used, the last element
3196 // of the Elts constant array will be one past the size of the vector.
3197 // Ignore the last element here, if it is greater than the mask size.
3198 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
3199 NumSrcElts--;
3200
3201 // modify when what gets shuffled in
3202 for (unsigned i = 0; i != NumSrcElts; ++i)
3203 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
3204 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
3205 } else {
3206 // We should never shorten the vector
3207 llvm_unreachable("unexpected shorten vector length");
3208 }
3209 } else {
3210 // If the Src is a scalar (not a vector), and the target is a vector it must
3211 // be updating one element.
3212 unsigned InIdx = getAccessedFieldNo(0, Elts);
3213 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
3214
3215 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
3216 }
3217
3218 Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
3219 Dst.isVolatileQualified());
3220}
3221
3222/// Store of global named registers are always calls to intrinsics.
3224 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
3225 "Bad type for register variable");
3226 llvm::MDNode *RegName = cast<llvm::MDNode>(
3227 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
3228 assert(RegName && "Register LValue is not metadata");
3229
3230 // We accept integer and pointer types only
3231 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
3232 llvm::Type *Ty = OrigTy;
3233 if (OrigTy->isPointerTy())
3234 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
3235 llvm::Type *Types[] = { Ty };
3236
3237 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
3238 llvm::Value *Value = Src.getScalarVal();
3239 if (OrigTy->isPointerTy())
3240 Value = Builder.CreatePtrToInt(Value, Ty);
3241 Builder.CreateCall(
3242 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
3243}
3244
3245// setObjCGCLValueClass - sets class of the lvalue for the purpose of
3246// generating write-barries API. It is currently a global, ivar,
3247// or neither.
3248static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
3249 LValue &LV,
3250 bool IsMemberAccess=false) {
3251 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
3252 return;
3253
3254 if (isa<ObjCIvarRefExpr>(E)) {
3255 QualType ExpTy = E->getType();
3256 if (IsMemberAccess && ExpTy->isPointerType()) {
3257 // If ivar is a structure pointer, assigning to field of
3258 // this struct follows gcc's behavior and makes it a non-ivar
3259 // writer-barrier conservatively.
3260 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
3261 if (ExpTy->isRecordType()) {
3262 LV.setObjCIvar(false);
3263 return;
3264 }
3265 }
3266 LV.setObjCIvar(true);
3267 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
3268 LV.setBaseIvarExp(Exp->getBase());
3269 LV.setObjCArray(E->getType()->isArrayType());
3270 return;
3271 }
3272
3273 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
3274 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
3275 if (VD->hasGlobalStorage()) {
3276 LV.setGlobalObjCRef(true);
3277 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
3278 }
3279 }
3280 LV.setObjCArray(E->getType()->isArrayType());
3281 return;
3282 }
3283
3284 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
3285 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3286 return;
3287 }
3288
3289 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
3290 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3291 if (LV.isObjCIvar()) {
3292 // If cast is to a structure pointer, follow gcc's behavior and make it
3293 // a non-ivar write-barrier.
3294 QualType ExpTy = E->getType();
3295 if (ExpTy->isPointerType())
3296 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
3297 if (ExpTy->isRecordType())
3298 LV.setObjCIvar(false);
3299 }
3300 return;
3301 }
3302
3303 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
3304 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
3305 return;
3306 }
3307
3308 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
3309 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3310 return;
3311 }
3312
3313 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
3314 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3315 return;
3316 }
3317
3318 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
3319 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3320 return;
3321 }
3322
3323 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
3324 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
3325 if (LV.isObjCIvar() && !LV.isObjCArray())
3326 // Using array syntax to assigning to what an ivar points to is not
3327 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
3328 LV.setObjCIvar(false);
3329 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
3330 // Using array syntax to assigning to what global points to is not
3331 // same as assigning to the global itself. {id *G;} G[i] = 0;
3332 LV.setGlobalObjCRef(false);
3333 return;
3334 }
3335
3336 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
3337 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
3338 // We don't know if member is an 'ivar', but this flag is looked at
3339 // only in the context of LV.isObjCIvar().
3340 LV.setObjCArray(E->getType()->isArrayType());
3341 return;
3342 }
3343}
3344
3346 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
3347 llvm::Type *RealVarTy, SourceLocation Loc) {
3348 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
3350 CGF, VD, Addr, Loc);
3351 else
3352 Addr =
3353 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
3354
3355 Addr = Addr.withElementType(RealVarTy);
3357}
3358
3360 const VarDecl *VD, QualType T) {
3361 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3362 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3363 // Return an invalid address if variable is MT_To (or MT_Enter starting with
3364 // OpenMP 5.2, or MT_Local in OpenMP 6.0) and unified memory is not enabled.
3365 // For all other cases: MT_Link and MT_To (or MT_Enter/MT_Local) with unified
3366 // memory, return a valid address.
3367 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3368 *Res == OMPDeclareTargetDeclAttr::MT_Enter ||
3369 *Res == OMPDeclareTargetDeclAttr::MT_Local) &&
3371 return Address::invalid();
3372 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
3373 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3374 *Res == OMPDeclareTargetDeclAttr::MT_Enter ||
3375 *Res == OMPDeclareTargetDeclAttr::MT_Local) &&
3377 "Expected link clause OR to clause with unified memory enabled.");
3378 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
3380 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
3381}
3382
3383Address
3385 LValueBaseInfo *PointeeBaseInfo,
3386 TBAAAccessInfo *PointeeTBAAInfo) {
3387 llvm::LoadInst *Load =
3388 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
3389 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
3390 QualType PTy = RefLVal.getType()->getPointeeType();
3391 CharUnits Align = CGM.getNaturalTypeAlignment(
3392 PTy, PointeeBaseInfo, PointeeTBAAInfo, /*ForPointeeType=*/true);
3393 if (!PTy->isIncompleteType()) {
3394 llvm::LLVMContext &Ctx = getLLVMContext();
3395 llvm::MDBuilder MDB(Ctx);
3396 // Emit !nonnull metadata
3397 if (CGM.getTypes().getTargetAddressSpace(PTy) == 0 &&
3398 !CGM.getCodeGenOpts().NullPointerIsValid)
3399 Load->setMetadata(llvm::LLVMContext::MD_nonnull,
3400 llvm::MDNode::get(Ctx, {}));
3401 // Emit !align metadata
3402 if (PTy->isObjectType()) {
3403 auto AlignVal = Align.getQuantity();
3404 if (AlignVal > 1) {
3405 Load->setMetadata(
3406 llvm::LLVMContext::MD_align,
3407 llvm::MDNode::get(Ctx, MDB.createConstant(llvm::ConstantInt::get(
3408 Builder.getInt64Ty(), AlignVal))));
3409 }
3410 }
3411 }
3412 return makeNaturalAddressForPointer(Load, PTy, Align,
3413 /*ForPointeeType=*/true, PointeeBaseInfo,
3414 PointeeTBAAInfo);
3415}
3416
3418 LValueBaseInfo PointeeBaseInfo;
3419 TBAAAccessInfo PointeeTBAAInfo;
3420 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
3421 &PointeeTBAAInfo);
3422 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
3423 PointeeBaseInfo, PointeeTBAAInfo);
3424}
3425
3427 const PointerType *PtrTy,
3428 LValueBaseInfo *BaseInfo,
3429 TBAAAccessInfo *TBAAInfo) {
3430 llvm::Value *Addr = Builder.CreateLoad(Ptr);
3431 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
3432 CharUnits(), /*ForPointeeType=*/true,
3433 BaseInfo, TBAAInfo);
3434}
3435
3437 const PointerType *PtrTy) {
3438 LValueBaseInfo BaseInfo;
3439 TBAAAccessInfo TBAAInfo;
3440 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
3441 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
3442}
3443
3445 const Expr *E, const VarDecl *VD) {
3446 QualType T = E->getType();
3447
3448 // If it's thread_local, emit a call to its wrapper function instead.
3449 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3451 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
3452 // Check if the variable is marked as declare target with link clause in
3453 // device codegen.
3454 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
3456 if (Addr.isValid())
3458 }
3459
3460 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
3461
3462 if (VD->getTLSKind() != VarDecl::TLS_None)
3463 V = CGF.Builder.CreateThreadLocalAddress(V);
3464
3465 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
3466 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
3467 Address Addr(V, RealVarTy, Alignment);
3468 // Emit reference to the private copy of the variable if it is an OpenMP
3469 // threadprivate variable.
3470 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
3471 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3472 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
3473 E->getExprLoc());
3474 }
3475 LValue LV = VD->getType()->isReferenceType() ?
3479 setObjCGCLValueClass(CGF.getContext(), E, LV);
3480 return LV;
3481}
3482
3484 llvm::Type *Ty) {
3485 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3486 if (FD->hasAttr<WeakRefAttr>()) {
3488 return aliasee.getPointer();
3489 }
3490
3491 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
3492 return V;
3493}
3494
3495static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
3496 GlobalDecl GD) {
3497 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3498 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
3499 QualType ETy = E->getType();
3501 if (auto *GV = dyn_cast<llvm::GlobalValue>(V))
3502 V = llvm::NoCFIValue::get(GV);
3503 }
3504 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
3505 return CGF.MakeAddrLValue(V, ETy, Alignment, AlignmentSource::Decl);
3506}
3507
3509 llvm::Value *ThisValue) {
3510
3511 return CGF.EmitLValueForLambdaField(FD, ThisValue);
3512}
3513
3514/// Named Registers are named metadata pointing to the register name
3515/// which will be read from/written to as an argument to the intrinsic
3516/// @llvm.read/write_register.
3517/// So far, only the name is being passed down, but other options such as
3518/// register type, allocation type or even optimization options could be
3519/// passed down via the metadata node.
3520static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
3521 SmallString<64> Name("llvm.named.register.");
3522 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
3523 assert(Asm->getLabel().size() < 64-Name.size() &&
3524 "Register name too big");
3525 Name.append(Asm->getLabel());
3526 llvm::NamedMDNode *M =
3527 CGM.getModule().getOrInsertNamedMetadata(Name);
3528 if (M->getNumOperands() == 0) {
3529 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
3530 Asm->getLabel());
3531 llvm::Metadata *Ops[] = {Str};
3532 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
3533 }
3534
3535 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
3536
3537 llvm::Value *Ptr =
3538 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
3539 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
3540}
3541
3542/// Determine whether we can emit a reference to \p VD from the current
3543/// context, despite not necessarily having seen an odr-use of the variable in
3544/// this context.
3546 const DeclRefExpr *E,
3547 const VarDecl *VD) {
3548 // For a variable declared in an enclosing scope, do not emit a spurious
3549 // reference even if we have a capture, as that will emit an unwarranted
3550 // reference to our capture state, and will likely generate worse code than
3551 // emitting a local copy.
3553 return false;
3554
3555 // For a local declaration declared in this function, we can always reference
3556 // it even if we don't have an odr-use.
3557 if (VD->hasLocalStorage()) {
3558 return VD->getDeclContext() ==
3559 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
3560 }
3561
3562 // For a global declaration, we can emit a reference to it if we know
3563 // for sure that we are able to emit a definition of it.
3564 VD = VD->getDefinition(CGF.getContext());
3565 if (!VD)
3566 return false;
3567
3568 // Don't emit a spurious reference if it might be to a variable that only
3569 // exists on a different device / target.
3570 // FIXME: This is unnecessarily broad. Check whether this would actually be a
3571 // cross-target reference.
3572 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3573 CGF.getLangOpts().OpenCL) {
3574 return false;
3575 }
3576
3577 // We can emit a spurious reference only if the linkage implies that we'll
3578 // be emitting a non-interposable symbol that will be retained until link
3579 // time.
3580 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3581 case llvm::GlobalValue::ExternalLinkage:
3582 case llvm::GlobalValue::LinkOnceODRLinkage:
3583 case llvm::GlobalValue::WeakODRLinkage:
3584 case llvm::GlobalValue::InternalLinkage:
3585 case llvm::GlobalValue::PrivateLinkage:
3586 return true;
3587 default:
3588 return false;
3589 }
3590}
3591
3593 const NamedDecl *ND = E->getDecl();
3594 QualType T = E->getType();
3595
3596 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3597 "should not emit an unevaluated operand");
3598
3599 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3600 // Global Named registers access via intrinsics only
3601 if (VD->getStorageClass() == SC_Register &&
3602 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3603 return EmitGlobalNamedRegister(VD, CGM);
3604
3605 // If this DeclRefExpr does not constitute an odr-use of the variable,
3606 // we're not permitted to emit a reference to it in general, and it might
3607 // not be captured if capture would be necessary for a use. Emit the
3608 // constant value directly instead.
3609 if (E->isNonOdrUse() == NOUR_Constant &&
3610 (VD->getType()->isReferenceType() ||
3611 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3612 VD->getAnyInitializer(VD);
3613 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3614 E->getLocation(), *VD->evaluateValue(), VD->getType());
3615 assert(Val && "failed to emit constant expression");
3616
3618 if (!VD->getType()->isReferenceType()) {
3619 // Spill the constant value to a global.
3620 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3621 getContext().getDeclAlign(VD));
3622 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3623 auto *PTy = llvm::PointerType::get(
3624 getLLVMContext(), getTypes().getTargetAddressSpace(VD->getType()));
3625 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3626 } else {
3627 // Should we be using the alignment of the constant pointer we emitted?
3628 CharUnits Alignment =
3629 CGM.getNaturalTypeAlignment(E->getType(),
3630 /* BaseInfo= */ nullptr,
3631 /* TBAAInfo= */ nullptr,
3632 /* forPointeeType= */ true);
3633 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3634 }
3636 }
3637
3638 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3639
3640 // Check for captured variables.
3642 VD = VD->getCanonicalDecl();
3643 if (auto *FD = LambdaCaptureFields.lookup(VD))
3644 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3645 if (CapturedStmtInfo) {
3646 auto I = LocalDeclMap.find(VD);
3647 if (I != LocalDeclMap.end()) {
3648 LValue CapLVal;
3649 if (VD->getType()->isReferenceType())
3650 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3652 else
3653 CapLVal = MakeAddrLValue(I->second, T);
3654 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3655 // in simd context.
3656 if (getLangOpts().OpenMP &&
3657 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3658 CapLVal.setNontemporal(/*Value=*/true);
3659 return CapLVal;
3660 }
3661 LValue CapLVal =
3662 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
3663 CapturedStmtInfo->getContextValue());
3664 Address LValueAddress = CapLVal.getAddress();
3665 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3666 LValueAddress.getElementType(),
3667 getContext().getDeclAlign(VD)),
3668 CapLVal.getType(),
3670 CapLVal.getTBAAInfo());
3671 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3672 // in simd context.
3673 if (getLangOpts().OpenMP &&
3674 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3675 CapLVal.setNontemporal(/*Value=*/true);
3676 return CapLVal;
3677 }
3678
3679 assert(isa<BlockDecl>(CurCodeDecl));
3680 Address addr = GetAddrOfBlockDecl(VD);
3681 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3682 }
3683 }
3684
3685 // FIXME: We should be able to assert this for FunctionDecls as well!
3686 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3687 // those with a valid source location.
3688 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3689 !E->getLocation().isValid()) &&
3690 "Should not use decl without marking it used!");
3691
3692 if (ND->hasAttr<WeakRefAttr>()) {
3693 const auto *VD = cast<ValueDecl>(ND);
3694 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3695 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3696 }
3697
3698 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3699 // Check if this is a global variable.
3700 if (VD->hasLinkage() || VD->isStaticDataMember())
3701 return EmitGlobalVarDeclLValue(*this, E, VD);
3702
3703 Address addr = Address::invalid();
3704
3705 // The variable should generally be present in the local decl map.
3706 auto iter = LocalDeclMap.find(VD);
3707 if (iter != LocalDeclMap.end()) {
3708 addr = iter->second;
3709
3710 // Otherwise, it might be static local we haven't emitted yet for
3711 // some reason; most likely, because it's in an outer function.
3712 } else if (VD->isStaticLocal()) {
3713 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3714 *VD, CGM.getLLVMLinkageVarDefinition(VD));
3715 addr = Address(
3716 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3717
3718 // No other cases for now.
3719 } else {
3720 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3721 }
3722
3723 // Handle threadlocal function locals.
3724 if (VD->getTLSKind() != VarDecl::TLS_None)
3725 addr = addr.withPointer(
3726 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3728
3729 // Check for OpenMP threadprivate variables.
3730 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3731 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3733 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3734 E->getExprLoc());
3735 }
3736
3737 // Drill into block byref variables.
3738 bool isBlockByref = VD->isEscapingByref();
3739 if (isBlockByref) {
3740 addr = emitBlockByrefAddress(addr, VD);
3741 }
3742
3743 // Drill into reference types.
3744 LValue LV = VD->getType()->isReferenceType() ?
3747
3748 bool isLocalStorage = VD->hasLocalStorage();
3749
3750 bool NonGCable = isLocalStorage &&
3751 !VD->getType()->isReferenceType() &&
3752 !isBlockByref;
3753 if (NonGCable) {
3755 LV.setNonGC(true);
3756 }
3757
3758 bool isImpreciseLifetime =
3759 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3760 if (isImpreciseLifetime)
3763 return LV;
3764 }
3765
3766 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3767 return EmitFunctionDeclLValue(*this, E, FD);
3768
3769 // FIXME: While we're emitting a binding from an enclosing scope, all other
3770 // DeclRefExprs we see should be implicitly treated as if they also refer to
3771 // an enclosing scope.
3772 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3774 auto *FD = LambdaCaptureFields.lookup(BD);
3775 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3776 }
3777 // Suppress debug location updates when visiting the binding, since the
3778 // binding may emit instructions that would otherwise be associated with the
3779 // binding itself, rather than the expression referencing the binding. (this
3780 // leads to jumpy debug stepping behavior where the location/debugger jump
3781 // back to the binding declaration, then back to the expression referencing
3782 // the binding)
3784 return EmitLValue(BD->getBinding(), NotKnownNonNull);
3785 }
3786
3787 // We can form DeclRefExprs naming GUID declarations when reconstituting
3788 // non-type template parameters into expressions.
3789 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3790 return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3792
3793 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3794 ConstantAddress ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3795 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3796
3797 if (AS != T.getAddressSpace()) {
3798 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3799 llvm::Type *PtrTy =
3800 llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3801 llvm::Constant *ASC = CGM.performAddrSpaceCast(ATPO.getPointer(), PtrTy);
3802 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3803 }
3804
3805 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3806 }
3807
3808 llvm_unreachable("Unhandled DeclRefExpr");
3809}
3810
3812 // __extension__ doesn't affect lvalue-ness.
3813 if (E->getOpcode() == UO_Extension)
3814 return EmitLValue(E->getSubExpr());
3815
3817 switch (E->getOpcode()) {
3818 default: llvm_unreachable("Unknown unary operator lvalue!");
3819 case UO_Deref: {
3821 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3822
3823 LValueBaseInfo BaseInfo;
3824 TBAAAccessInfo TBAAInfo;
3826 &TBAAInfo);
3827 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3829
3830 // We should not generate __weak write barrier on indirect reference
3831 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3832 // But, we continue to generate __strong write barrier on indirect write
3833 // into a pointer to object.
3834 if (getLangOpts().ObjC &&
3835 getLangOpts().getGC() != LangOptions::NonGC &&
3836 LV.isObjCWeak())
3838 return LV;
3839 }
3840 case UO_Real:
3841 case UO_Imag: {
3842 LValue LV = EmitLValue(E->getSubExpr());
3843 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3844
3845 // __real is valid on scalars. This is a faster way of testing that.
3846 // __imag can only produce an rvalue on scalars.
3847 if (E->getOpcode() == UO_Real &&
3848 !LV.getAddress().getElementType()->isStructTy()) {
3849 assert(E->getSubExpr()->getType()->isArithmeticType());
3850 return LV;
3851 }
3852
3853 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3854
3855 Address Component =
3856 (E->getOpcode() == UO_Real
3859 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3860 CGM.getTBAAInfoForSubobject(LV, T));
3861 ElemLV.getQuals().addQualifiers(LV.getQuals());
3862 return ElemLV;
3863 }
3864 case UO_PreInc:
3865 case UO_PreDec: {
3866 LValue LV = EmitLValue(E->getSubExpr());
3867 bool isInc = E->getOpcode() == UO_PreInc;
3868
3869 if (E->getType()->isAnyComplexType())
3870 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3871 else
3872 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3873 return LV;
3874 }
3875 }
3876}
3877
3879 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3881}
3882
3884 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3886}
3887
3889 auto SL = E->getFunctionName();
3890 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3891 StringRef FnName = CurFn->getName();
3892 FnName.consume_front("\01");
3893 StringRef NameItems[] = {
3895 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3896 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3897 std::string Name = std::string(SL->getString());
3898 if (!Name.empty()) {
3899 unsigned Discriminator =
3900 CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3901 if (Discriminator)
3902 Name += "_" + Twine(Discriminator + 1).str();
3903 auto C = CGM.GetAddrOfConstantCString(Name, GVName);
3905 } else {
3906 auto C = CGM.GetAddrOfConstantCString(std::string(FnName), GVName);
3908 }
3909 }
3910 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3912}
3913
3914/// Emit a type description suitable for use by a runtime sanitizer library. The
3915/// format of a type descriptor is
3916///
3917/// \code
3918/// { i16 TypeKind, i16 TypeInfo }
3919/// \endcode
3920///
3921/// followed by an array of i8 containing the type name with extra information
3922/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3923/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3924/// anything else.
3926 // Only emit each type's descriptor once.
3927 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3928 return C;
3929
3930 uint16_t TypeKind = TK_Unknown;
3931 uint16_t TypeInfo = 0;
3932 bool IsBitInt = false;
3933
3934 if (T->isIntegerType()) {
3935 TypeKind = TK_Integer;
3936 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3937 (T->isSignedIntegerType() ? 1 : 0);
3938 // Follow suggestion from discussion of issue 64100.
3939 // So we can write the exact amount of bits in TypeName after '\0'
3940 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3941 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3942 // Do a sanity checks as we are using 32-bit type to store bit length.
3943 assert(getContext().getTypeSize(T) > 0 &&
3944 " non positive amount of bits in __BitInt type");
3945 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3946 " too many bits in __BitInt type");
3947
3948 // Redefine TypeKind with the actual __BitInt type if we have signed
3949 // BitInt.
3950 TypeKind = TK_BitInt;
3951 IsBitInt = true;
3952 }
3953 } else if (T->isFloatingType()) {
3954 TypeKind = TK_Float;
3956 }
3957
3958 // Format the type name as if for a diagnostic, including quotes and
3959 // optionally an 'aka'.
3960 SmallString<32> Buffer;
3961 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
3962 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3963 StringRef(), {}, Buffer, {});
3964
3965 if (IsBitInt) {
3966 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3967 // endianness, zero.
3968 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3969 const auto *EIT = T->castAs<BitIntType>();
3970 uint32_t Bits = EIT->getNumBits();
3971 llvm::support::endian::write32(S + 1, Bits,
3972 getTarget().isBigEndian()
3973 ? llvm::endianness::big
3974 : llvm::endianness::little);
3975 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3976 Buffer.append(Str);
3977 }
3978
3979 llvm::Constant *Components[] = {
3980 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3981 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3982 };
3983 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3984
3985 auto *GV = new llvm::GlobalVariable(
3986 CGM.getModule(), Descriptor->getType(),
3987 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3988 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3989 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3990
3991 // Remember the descriptor for this type.
3992 CGM.setTypeDescriptorInMap(T, GV);
3993
3994 return GV;
3995}
3996
3997llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3998 llvm::Type *TargetTy = IntPtrTy;
3999
4000 if (V->getType() == TargetTy)
4001 return V;
4002
4003 // Floating-point types which fit into intptr_t are bitcast to integers
4004 // and then passed directly (after zero-extension, if necessary).
4005 if (V->getType()->isFloatingPointTy()) {
4006 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
4007 if (Bits <= TargetTy->getIntegerBitWidth())
4008 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
4009 Bits));
4010 }
4011
4012 // Integers which fit in intptr_t are zero-extended and passed directly.
4013 if (V->getType()->isIntegerTy() &&
4014 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
4015 return Builder.CreateZExt(V, TargetTy);
4016
4017 // Pointers are passed directly, everything else is passed by address.
4018 if (!V->getType()->isPointerTy()) {
4019 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
4020 Builder.CreateStore(V, Ptr);
4021 V = Ptr.getPointer();
4022 }
4023 return Builder.CreatePtrToInt(V, TargetTy);
4024}
4025
4026/// Emit a representation of a SourceLocation for passing to a handler
4027/// in a sanitizer runtime library. The format for this data is:
4028/// \code
4029/// struct SourceLocation {
4030/// const char *Filename;
4031/// int32_t Line, Column;
4032/// };
4033/// \endcode
4034/// For an invalid SourceLocation, the Filename pointer is null.
4036 llvm::Constant *Filename;
4037 int Line, Column;
4038
4040 if (PLoc.isValid()) {
4041 StringRef FilenameString = PLoc.getFilename();
4042
4043 int PathComponentsToStrip =
4044 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
4045 if (PathComponentsToStrip < 0) {
4046 assert(PathComponentsToStrip != INT_MIN);
4047 int PathComponentsToKeep = -PathComponentsToStrip;
4048 auto I = llvm::sys::path::rbegin(FilenameString);
4049 auto E = llvm::sys::path::rend(FilenameString);
4050 while (I != E && --PathComponentsToKeep)
4051 ++I;
4052
4053 FilenameString = FilenameString.substr(I - E);
4054 } else if (PathComponentsToStrip > 0) {
4055 auto I = llvm::sys::path::begin(FilenameString);
4056 auto E = llvm::sys::path::end(FilenameString);
4057 while (I != E && PathComponentsToStrip--)
4058 ++I;
4059
4060 if (I != E)
4061 FilenameString =
4062 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
4063 else
4064 FilenameString = llvm::sys::path::filename(FilenameString);
4065 }
4066
4067 auto FilenameGV =
4068 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
4069 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
4071 FilenameGV.getPointer()->stripPointerCasts()));
4072 Filename = FilenameGV.getPointer();
4073 Line = PLoc.getLine();
4074 Column = PLoc.getColumn();
4075 } else {
4076 Filename = llvm::Constant::getNullValue(Int8PtrTy);
4077 Line = Column = 0;
4078 }
4079
4080 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
4081 Builder.getInt32(Column)};
4082
4083 return llvm::ConstantStruct::getAnon(Data);
4084}
4085
4086namespace {
4087/// Specify under what conditions this check can be recovered
4088enum class CheckRecoverableKind {
4089 /// Always terminate program execution if this check fails.
4091 /// Check supports recovering, runtime has both fatal (noreturn) and
4092 /// non-fatal handlers for this check.
4093 Recoverable,
4094 /// Runtime conditionally aborts, always need to support recovery.
4096};
4097}
4098
4099static CheckRecoverableKind
4101 if (Ordinal == SanitizerKind::SO_Vptr)
4102 return CheckRecoverableKind::AlwaysRecoverable;
4103 else if (Ordinal == SanitizerKind::SO_Return ||
4104 Ordinal == SanitizerKind::SO_Unreachable)
4105 return CheckRecoverableKind::Unrecoverable;
4106 else
4107 return CheckRecoverableKind::Recoverable;
4108}
4109
4110namespace {
4111struct SanitizerHandlerInfo {
4112 char const *const Name;
4113 unsigned Version;
4114};
4115}
4116
4117const SanitizerHandlerInfo SanitizerHandlers[] = {
4118#define SANITIZER_CHECK(Enum, Name, Version, Msg) {#Name, Version},
4120#undef SANITIZER_CHECK
4121};
4122
4124 llvm::FunctionType *FnType,
4126 SanitizerHandler CheckHandler,
4127 CheckRecoverableKind RecoverKind, bool IsFatal,
4128 llvm::BasicBlock *ContBB, bool NoMerge) {
4129 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
4130 std::optional<ApplyDebugLocation> DL;
4131 if (!CGF.Builder.getCurrentDebugLocation()) {
4132 // Ensure that the call has at least an artificial debug location.
4133 DL.emplace(CGF, SourceLocation());
4134 }
4135 bool NeedsAbortSuffix =
4136 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
4137 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
4138 bool HandlerPreserveAllRegs =
4139 CGF.CGM.getCodeGenOpts().SanitizeHandlerPreserveAllRegs;
4140 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
4141 const StringRef CheckName = CheckInfo.Name;
4142 std::string FnName = "__ubsan_handle_" + CheckName.str();
4143 if (CheckInfo.Version && !MinimalRuntime)
4144 FnName += "_v" + llvm::utostr(CheckInfo.Version);
4145 if (MinimalRuntime)
4146 FnName += "_minimal";
4147 if (NeedsAbortSuffix)
4148 FnName += "_abort";
4149 if (HandlerPreserveAllRegs && !NeedsAbortSuffix)
4150 FnName += "_preserve";
4151 bool MayReturn =
4152 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
4153
4154 llvm::AttrBuilder B(CGF.getLLVMContext());
4155 if (!MayReturn) {
4156 B.addAttribute(llvm::Attribute::NoReturn)
4157 .addAttribute(llvm::Attribute::NoUnwind);
4158 }
4159 B.addUWTableAttr(llvm::UWTableKind::Default);
4160
4161 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
4162 FnType, FnName,
4163 llvm::AttributeList::get(CGF.getLLVMContext(),
4164 llvm::AttributeList::FunctionIndex, B),
4165 /*Local=*/true);
4166 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
4167 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().isOptimizedBuild() ||
4168 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4169 if (NoMerge)
4170 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
4171 if (HandlerPreserveAllRegs && !NeedsAbortSuffix) {
4172 // N.B. there is also a clang::CallingConv which is not what we want here.
4173 HandlerCall->setCallingConv(llvm::CallingConv::PreserveAll);
4174 }
4175 if (!MayReturn) {
4176 HandlerCall->setDoesNotReturn();
4177 CGF.Builder.CreateUnreachable();
4178 } else {
4179 CGF.Builder.CreateBr(ContBB);
4180 }
4181}
4182
4184 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
4185 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
4186 ArrayRef<llvm::Value *> DynamicArgs, const TrapReason *TR) {
4187 assert(IsSanitizerScope);
4188 assert(Checked.size() > 0);
4189 assert(CheckHandler >= 0 &&
4190 size_t(CheckHandler) < std::size(SanitizerHandlers));
4191 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
4192
4193 llvm::Value *FatalCond = nullptr;
4194 llvm::Value *RecoverableCond = nullptr;
4195 llvm::Value *TrapCond = nullptr;
4196 bool NoMerge = false;
4197 // Expand checks into:
4198 // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
4199 // We need separate allow_ubsan_check intrinsics because they have separately
4200 // specified cutoffs.
4201 // This expression looks expensive but will be simplified after
4202 // LowerAllowCheckPass.
4203 for (auto &[Check, Ord] : Checked) {
4204 llvm::Value *GuardedCheck = Check;
4206 (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
4207 llvm::Value *Allow = Builder.CreateCall(
4208 CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
4209 llvm::ConstantInt::get(CGM.Int8Ty, Ord));
4210 GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
4211 }
4212
4213 // -fsanitize-trap= overrides -fsanitize-recover=.
4214 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
4215 : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
4216 ? RecoverableCond
4217 : FatalCond;
4218 Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
4219
4220 if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
4221 NoMerge = true;
4222 }
4223
4224 if (TrapCond)
4225 EmitTrapCheck(TrapCond, CheckHandler, NoMerge, TR);
4226 if (!FatalCond && !RecoverableCond)
4227 return;
4228
4229 llvm::Value *JointCond;
4230 if (FatalCond && RecoverableCond)
4231 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
4232 else
4233 JointCond = FatalCond ? FatalCond : RecoverableCond;
4234 assert(JointCond);
4235
4236 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
4237 assert(SanOpts.has(Checked[0].second));
4238#ifndef NDEBUG
4239 for (int i = 1, n = Checked.size(); i < n; ++i) {
4240 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
4241 "All recoverable kinds in a single check must be same!");
4242 assert(SanOpts.has(Checked[i].second));
4243 }
4244#endif
4245
4246 llvm::BasicBlock *Cont = createBasicBlock("cont");
4247 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
4248 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
4249 // Give hint that we very much don't expect to execute the handler
4250 llvm::MDBuilder MDHelper(getLLVMContext());
4251 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4252 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
4253 EmitBlock(Handlers);
4254
4255 // Clear arguments for the MinimalRuntime handler.
4256 if (CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
4257 StaticArgs = {};
4258 DynamicArgs = {};
4259 }
4260
4261 // Handler functions take an i8* pointing to the (handler-specific) static
4262 // information block, followed by a sequence of intptr_t arguments
4263 // representing operand values.
4266
4267 Args.reserve(DynamicArgs.size() + 1);
4268 ArgTypes.reserve(DynamicArgs.size() + 1);
4269
4270 // Emit handler arguments and create handler function type.
4271 if (!StaticArgs.empty()) {
4272 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
4273 auto *InfoPtr = new llvm::GlobalVariable(
4274 CGM.getModule(), Info->getType(),
4275 // Non-constant global is used in a handler to deduplicate reports.
4276 // TODO: change deduplication logic and make it constant.
4277 /*isConstant=*/false, llvm::GlobalVariable::PrivateLinkage, Info, "",
4278 nullptr, llvm::GlobalVariable::NotThreadLocal,
4279 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
4280 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4281 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
4282 Args.push_back(InfoPtr);
4283 ArgTypes.push_back(Args.back()->getType());
4284 }
4285
4286 for (llvm::Value *DynamicArg : DynamicArgs) {
4287 Args.push_back(EmitCheckValue(DynamicArg));
4288 ArgTypes.push_back(IntPtrTy);
4289 }
4290
4291 llvm::FunctionType *FnType =
4292 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
4293
4294 if (!FatalCond || !RecoverableCond) {
4295 // Simple case: we need to generate a single handler call, either
4296 // fatal, or non-fatal.
4297 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
4298 (FatalCond != nullptr), Cont, NoMerge);
4299 } else {
4300 // Emit two handler calls: first one for set of unrecoverable checks,
4301 // another one for recoverable.
4302 llvm::BasicBlock *NonFatalHandlerBB =
4303 createBasicBlock("non_fatal." + CheckName);
4304 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
4305 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
4306 EmitBlock(FatalHandlerBB);
4307 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
4308 NonFatalHandlerBB, NoMerge);
4309 EmitBlock(NonFatalHandlerBB);
4310 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
4311 Cont, NoMerge);
4312 }
4313
4314 EmitBlock(Cont);
4315}
4316
4318 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
4319 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4320 ArrayRef<llvm::Constant *> StaticArgs) {
4321 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
4322
4323 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
4324 llvm::CondBrInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
4325
4326 llvm::MDBuilder MDHelper(getLLVMContext());
4327 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4328 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
4329
4330 EmitBlock(CheckBB);
4331
4332 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
4333
4334 llvm::CallInst *CheckCall;
4335 llvm::FunctionCallee SlowPathFn;
4336 if (WithDiag) {
4337 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
4338 auto *InfoPtr =
4339 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
4340 llvm::GlobalVariable::PrivateLinkage, Info);
4341 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4342 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
4343
4344 SlowPathFn = CGM.getModule().getOrInsertFunction(
4345 "__cfi_slowpath_diag",
4346 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
4347 false));
4348 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
4349 } else {
4350 SlowPathFn = CGM.getModule().getOrInsertFunction(
4351 "__cfi_slowpath",
4352 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
4353 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
4354 }
4355
4356 CGM.setDSOLocal(
4357 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
4358 CheckCall->setDoesNotThrow();
4359
4360 EmitBlock(Cont);
4361}
4362
4363// Emit a stub for __cfi_check function so that the linker knows about this
4364// symbol in LTO mode.
4366 llvm::Module *M = &CGM.getModule();
4367 ASTContext &C = getContext();
4368 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
4369
4371 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
4372 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
4373 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
4375 FnArgs.push_back(&ArgCallsiteTypeId);
4376 FnArgs.push_back(&ArgAddr);
4377 FnArgs.push_back(&ArgCFICheckFailData);
4378 const CGFunctionInfo &FI =
4379 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
4380
4381 llvm::Function *F = llvm::Function::Create(
4382 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
4383 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
4384 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4385 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4386 F->setAlignment(llvm::Align(4096));
4387 CGM.setDSOLocal(F);
4388
4389 llvm::LLVMContext &Ctx = M->getContext();
4390 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
4391 // CrossDSOCFI pass is not executed if there is no executable code.
4392 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
4393 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
4394 llvm::ReturnInst::Create(Ctx, nullptr, BB);
4395}
4396
4397// This function is basically a switch over the CFI failure kind, which is
4398// extracted from CFICheckFailData (1st function argument). Each case is either
4399// llvm.trap or a call to one of the two runtime handlers, based on
4400// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
4401// failure kind) traps, but this should really never happen. CFICheckFailData
4402// can be nullptr if the calling module has -fsanitize-trap behavior for this
4403// check kind; in this case __cfi_check_fail traps as well.
4405 auto CheckHandler = SanitizerHandler::CFICheckFail;
4406 // TODO: the SanitizerKind is not yet determined for this check (and might
4407 // not even be available, if Data == nullptr). However, we still want to
4408 // annotate the instrumentation. We approximate this by using all the CFI
4409 // kinds.
4410 SanitizerDebugLocation SanScope(
4411 this,
4412 {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall,
4413 SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast,
4414 SanitizerKind::SO_CFIICall},
4415 CheckHandler);
4416 FunctionArgList Args;
4421 Args.push_back(&ArgData);
4422 Args.push_back(&ArgAddr);
4423
4424 const CGFunctionInfo &FI =
4425 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
4426
4427 llvm::Function *F = llvm::Function::Create(
4428 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
4429 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
4430
4431 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4432 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4433 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
4434
4435 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
4436 SourceLocation());
4437
4439
4440 // This function is not affected by NoSanitizeList. This function does
4441 // not have a source location, but "src:*" would still apply. Revert any
4442 // changes to SanOpts made in StartFunction.
4443 SanOpts = CGM.getLangOpts().Sanitize;
4444
4445 llvm::Value *Data =
4446 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
4447 CGM.getContext().VoidPtrTy, ArgData.getLocation());
4448 llvm::Value *Addr =
4449 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
4450 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
4451
4452 // Data == nullptr means the calling module has trap behaviour for this check.
4453 llvm::Value *DataIsNotNullPtr =
4454 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
4455 // TODO: since there is no data, we don't know the CheckKind, and therefore
4456 // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to
4457 // NoMerge = false. Users can disable merging by disabling optimization.
4458 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail,
4459 /*NoMerge=*/false);
4460
4461 llvm::StructType *SourceLocationTy =
4462 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
4463 llvm::StructType *CfiCheckFailDataTy =
4464 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
4465
4466 llvm::Value *V = Builder.CreateConstGEP2_32(
4467 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, DefaultPtrTy), 0, 0);
4468
4469 Address CheckKindAddr(V, Int8Ty, getIntAlign());
4470 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
4471
4472 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
4473 CGM.getLLVMContext(),
4474 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
4475 llvm::Value *ValidVtable = Builder.CreateZExt(
4476 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
4477 {Addr, AllVtables}),
4478 IntPtrTy);
4479
4480 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
4481 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
4482 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
4483 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
4484 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
4485 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
4486
4487 for (auto CheckKindOrdinalPair : CheckKinds) {
4488 int Kind = CheckKindOrdinalPair.first;
4489 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
4490
4491 // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of
4492 // relying on the SanitizerScope with all CFI ordinals
4493
4494 llvm::Value *Cond =
4495 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
4496 if (CGM.getLangOpts().Sanitize.has(Ordinal))
4497 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
4498 {}, {Data, Addr, ValidVtable});
4499 else
4500 // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers.
4501 // Although the compiler allows SanitizeMergeHandlers to be set
4502 // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp
4503 // requires that SanitizeMergeHandlers is a subset of Sanitize.
4504 EmitTrapCheck(Cond, CheckHandler, /*NoMerge=*/false);
4505 }
4506
4508 // The only reference to this function will be created during LTO link.
4509 // Make sure it survives until then.
4510 CGM.addUsedGlobal(F);
4511}
4512
4514 if (SanOpts.has(SanitizerKind::Unreachable)) {
4515 auto CheckOrdinal = SanitizerKind::SO_Unreachable;
4516 auto CheckHandler = SanitizerHandler::BuiltinUnreachable;
4517 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4518 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
4519 CheckOrdinal),
4520 CheckHandler, EmitCheckSourceLocation(Loc), {});
4521 }
4522 Builder.CreateUnreachable();
4523}
4524
4525void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
4526 SanitizerHandler CheckHandlerID,
4527 bool NoMerge, const TrapReason *TR) {
4528 llvm::BasicBlock *Cont = createBasicBlock("cont");
4529
4530 // If we're optimizing, collapse all calls to trap down to just one per
4531 // check-type per function to save on code size.
4532 if ((int)TrapBBs.size() <= CheckHandlerID)
4533 TrapBBs.resize(CheckHandlerID + 1);
4534
4535 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
4536
4537 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
4538 llvm::StringRef TrapMessage;
4539 llvm::StringRef TrapCategory;
4540 auto DebugTrapReasonKind = CGM.getCodeGenOpts().getSanitizeDebugTrapReasons();
4541 if (TR && !TR->isEmpty() &&
4542 DebugTrapReasonKind ==
4544 TrapMessage = TR->getMessage();
4545 TrapCategory = TR->getCategory();
4546 } else {
4547 TrapMessage = GetUBSanTrapForHandler(CheckHandlerID);
4548 TrapCategory = "Undefined Behavior Sanitizer";
4549 }
4550
4551 if (getDebugInfo() && !TrapMessage.empty() &&
4552 DebugTrapReasonKind !=
4554 TrapLocation) {
4555 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
4556 TrapLocation, TrapCategory, TrapMessage);
4557 }
4558
4559 NoMerge = NoMerge || !CGM.getCodeGenOpts().isOptimizedBuild() ||
4560 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4561
4562 llvm::MDBuilder MDHelper(getLLVMContext());
4563 if (TrapBB && !NoMerge) {
4564 auto Call = TrapBB->begin();
4565 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
4566
4567 Call->applyMergedLocation(Call->getDebugLoc(), TrapLocation);
4568
4569 Builder.CreateCondBr(Checked, Cont, TrapBB,
4570 MDHelper.createLikelyBranchWeights());
4571 } else {
4572 TrapBB = createBasicBlock("trap");
4573 Builder.CreateCondBr(Checked, Cont, TrapBB,
4574 MDHelper.createLikelyBranchWeights());
4575 EmitBlock(TrapBB);
4576
4577 ApplyDebugLocation applyTrapDI(*this, TrapLocation);
4578
4579 llvm::CallInst *TrapCall;
4580 if (CGM.getCodeGenOpts().SanitizeTrapLoop)
4581 TrapCall =
4582 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::looptrap));
4583 else
4584 TrapCall = Builder.CreateCall(
4585 CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
4586 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
4587
4588 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4589 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4590 CGM.getCodeGenOpts().TrapFuncName);
4591 TrapCall->addFnAttr(A);
4592 }
4593 if (NoMerge)
4594 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4595 TrapCall->setDoesNotReturn();
4596 TrapCall->setDoesNotThrow();
4597 Builder.CreateUnreachable();
4598 }
4599
4600 EmitBlock(Cont);
4601}
4602
4603llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
4604 llvm::CallInst *TrapCall =
4605 Builder.CreateCall(CGM.getIntrinsic(IntrID));
4606
4607 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4608 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4609 CGM.getCodeGenOpts().TrapFuncName);
4610 TrapCall->addFnAttr(A);
4611 }
4612
4614 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4615 return TrapCall;
4616}
4617
4619 LValueBaseInfo *BaseInfo,
4620 TBAAAccessInfo *TBAAInfo) {
4621 assert(E->getType()->isArrayType() &&
4622 "Array to pointer decay must have array source type!");
4623
4624 // Expressions of array type can't be bitfields or vector elements.
4625 LValue LV = EmitLValue(E);
4626 Address Addr = LV.getAddress();
4627
4628 // If the array type was an incomplete type, we need to make sure
4629 // the decay ends up being the right type.
4630 llvm::Type *NewTy = ConvertType(E->getType());
4631 Addr = Addr.withElementType(NewTy);
4632
4633 // Note that VLA pointers are always decayed, so we don't need to do
4634 // anything here.
4635 if (!E->getType()->isVariableArrayType()) {
4636 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4637 "Expected pointer to array");
4638
4639 if (getLangOpts().EmitLogicalPointer) {
4640 // Array-to-pointer decay for an SGEP is a no-op as we don't do any
4641 // logical indexing. See #179951 for some additional context.
4642 auto *SGEP =
4643 Builder.CreateStructuredGEP(NewTy, Addr.emitRawPointer(*this), {});
4644 Addr = Address(SGEP, NewTy, Addr.getAlignment(), Addr.isKnownNonNull());
4645 } else {
4646 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4647 }
4648 }
4649
4650 // The result of this decay conversion points to an array element within the
4651 // base lvalue. However, since TBAA currently does not support representing
4652 // accesses to elements of member arrays, we conservatively represent accesses
4653 // to the pointee object as if it had no any base lvalue specified.
4654 // TODO: Support TBAA for member arrays.
4656 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4657 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4658
4659 return Addr.withElementType(ConvertTypeForMem(EltType));
4660}
4661
4662/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4663/// array to pointer, return the array subexpression.
4664static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4665 // If this isn't just an array->pointer decay, bail out.
4666 const auto *CE = dyn_cast<CastExpr>(E);
4667 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4668 return nullptr;
4669
4670 // If this is a decay from variable width array, bail out.
4671 const Expr *SubExpr = CE->getSubExpr();
4672 if (SubExpr->getType()->isVariableArrayType())
4673 return nullptr;
4674
4675 return SubExpr;
4676}
4677
4679 llvm::Type *elemType,
4680 llvm::Value *ptr,
4681 ArrayRef<llvm::Value*> indices,
4682 bool inbounds,
4683 bool signedIndices,
4684 SourceLocation loc,
4685 const llvm::Twine &name = "arrayidx") {
4686 if (inbounds && CGF.getLangOpts().EmitLogicalPointer)
4687 return CGF.Builder.CreateStructuredGEP(elemType, ptr, indices);
4688
4689 if (inbounds) {
4690 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4692 name);
4693 } else {
4694 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4695 }
4696}
4697
4700 llvm::Type *arrayType,
4701 llvm::Type *elementType, bool inbounds,
4702 bool signedIndices, SourceLocation loc,
4703 CharUnits align,
4704 const llvm::Twine &name = "arrayidx") {
4705 if (inbounds && CGF.getLangOpts().EmitLogicalPointer)
4706 return RawAddress(CGF.Builder.CreateStructuredGEP(arrayType,
4707 addr.emitRawPointer(CGF),
4708 indices.drop_front()),
4709 elementType, align);
4710
4711 if (inbounds) {
4712 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4714 align, name);
4715 } else {
4716 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4717 }
4718}
4719
4721 const VariableArrayType *vla) {
4722 QualType eltType;
4723 do {
4724 eltType = vla->getElementType();
4725 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4726 return eltType;
4727}
4728
4730 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4731}
4732
4733static bool hasBPFPreserveStaticOffset(const Expr *E) {
4734 if (!E)
4735 return false;
4736 QualType PointeeType = E->getType()->getPointeeType();
4737 if (PointeeType.isNull())
4738 return false;
4739 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4740 return hasBPFPreserveStaticOffset(BaseDecl);
4741 return false;
4742}
4743
4744// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4746 Address &Addr) {
4747 if (!CGF.getTarget().getTriple().isBPF())
4748 return Addr;
4749
4750 llvm::Function *Fn =
4751 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4752 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4753 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4754}
4755
4756/// Given an array base, check whether its member access belongs to a record
4757/// with preserve_access_index attribute or not.
4758static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4759 if (!ArrayBase || !CGF.getDebugInfo())
4760 return false;
4761
4762 // Only support base as either a MemberExpr or DeclRefExpr.
4763 // DeclRefExpr to cover cases like:
4764 // struct s { int a; int b[10]; };
4765 // struct s *p;
4766 // p[1].a
4767 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4768 // p->b[5] is a MemberExpr example.
4769 const Expr *E = ArrayBase->IgnoreImpCasts();
4770 if (const auto *ME = dyn_cast<MemberExpr>(E))
4771 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4772
4773 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4774 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4775 if (!VarDef)
4776 return false;
4777
4778 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4779 if (!PtrT)
4780 return false;
4781
4782 const auto *PointeeT = PtrT->getPointeeType()
4784 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4785 return RecT->getDecl()
4786 ->getMostRecentDecl()
4787 ->hasAttr<BPFPreserveAccessIndexAttr>();
4788 return false;
4789 }
4790
4791 return false;
4792}
4793
4796 QualType eltType, bool inbounds,
4797 bool signedIndices, SourceLocation loc,
4798 QualType *arrayType = nullptr,
4799 const Expr *Base = nullptr,
4800 const llvm::Twine &name = "arrayidx") {
4801 // All the indices except that last must be zero.
4802#ifndef NDEBUG
4803 for (auto *idx : indices.drop_back())
4804 assert(isa<llvm::ConstantInt>(idx) &&
4805 cast<llvm::ConstantInt>(idx)->isZero());
4806#endif
4807
4808 // Determine the element size of the statically-sized base. This is
4809 // the thing that the indices are expressed in terms of.
4810 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4811 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4812 }
4813
4814 // We can use that to compute the best alignment of the element.
4815 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4816 CharUnits eltAlign =
4817 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4818
4820 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4821
4822 llvm::Value *eltPtr;
4823 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4824 if (!LastIndex ||
4826 addr = emitArraySubscriptGEP(CGF, addr, indices,
4828 : nullptr,
4829 CGF.ConvertTypeForMem(eltType), inbounds,
4830 signedIndices, loc, eltAlign, name);
4831 return addr;
4832 } else {
4833 // Remember the original array subscript for bpf target
4834 unsigned idx = LastIndex->getZExtValue();
4835 llvm::DIType *DbgInfo = nullptr;
4836 if (arrayType)
4837 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4838 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4839 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4840 idx, DbgInfo);
4841 }
4842
4843 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4844}
4845
4846namespace {
4847
4848/// StructFieldAccess is a simple visitor class to grab the first l-value to
4849/// r-value cast Expr.
4850struct StructFieldAccess
4851 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
4852 const Expr *VisitCastExpr(const CastExpr *E) {
4853 if (E->getCastKind() == CK_LValueToRValue)
4854 return E;
4855 return Visit(E->getSubExpr());
4856 }
4857 const Expr *VisitParenExpr(const ParenExpr *E) {
4858 return Visit(E->getSubExpr());
4859 }
4860};
4861
4862} // end anonymous namespace
4863
4864/// The offset of a field from the beginning of the record.
4866 const FieldDecl *Field, int64_t &Offset) {
4867 ASTContext &Ctx = CGF.getContext();
4868 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4869 unsigned FieldNo = 0;
4870
4871 for (const FieldDecl *FD : RD->fields()) {
4872 if (FD == Field) {
4873 Offset += Layout.getFieldOffset(FieldNo);
4874 return true;
4875 }
4876
4877 QualType Ty = FD->getType();
4878 if (Ty->isRecordType())
4879 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4880 Offset += Layout.getFieldOffset(FieldNo);
4881 return true;
4882 }
4883
4884 if (!RD->isUnion())
4885 ++FieldNo;
4886 }
4887
4888 return false;
4889}
4890
4891/// Returns the relative offset difference between \p FD1 and \p FD2.
4892/// \code
4893/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4894/// \endcode
4895/// Both fields must be within the same struct.
4896static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4897 const FieldDecl *FD1,
4898 const FieldDecl *FD2) {
4899 const RecordDecl *FD1OuterRec =
4901 const RecordDecl *FD2OuterRec =
4903
4904 if (FD1OuterRec != FD2OuterRec)
4905 // Fields must be within the same RecordDecl.
4906 return std::optional<int64_t>();
4907
4908 int64_t FD1Offset = 0;
4909 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4910 return std::optional<int64_t>();
4911
4912 int64_t FD2Offset = 0;
4913 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4914 return std::optional<int64_t>();
4915
4916 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4917}
4918
4919/// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by"
4920/// attribute, generate bounds checking code. The "count" field is at the top
4921/// level of the struct or in an anonymous struct, that's also at the top level.
4922/// Future expansions may allow the "count" to reside at any place in the
4923/// struct, but the value of "counted_by" will be a "simple" path to the count,
4924/// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4925/// similar to emit the correct GEP.
4927 const Expr *ArrayExpr, QualType ArrayType, Address ArrayInst,
4928 QualType IndexType, llvm::Value *IndexVal, bool Accessed,
4929 bool FlexibleArray) {
4930 const auto *ME = dyn_cast<MemberExpr>(ArrayExpr->IgnoreImpCasts());
4931 if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType())
4932 return;
4933
4934 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4935 getLangOpts().getStrictFlexArraysLevel();
4936 if (FlexibleArray &&
4937 !ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel))
4938 return;
4939
4940 const FieldDecl *FD = cast<FieldDecl>(ME->getMemberDecl());
4941 const FieldDecl *CountFD = FD->findCountedByField();
4942 if (!CountFD)
4943 return;
4944
4945 if (std::optional<int64_t> Diff =
4946 getOffsetDifferenceInBits(*this, CountFD, FD)) {
4947 if (!ArrayInst.isValid()) {
4948 // An invalid Address indicates we're checking a pointer array access.
4949 // Emit the checked L-Value here.
4950 LValue LV = EmitCheckedLValue(ArrayExpr, TCK_MemberAccess);
4951 ArrayInst = LV.getAddress();
4952 }
4953
4954 // FIXME: The 'static_cast' is necessary, otherwise the result turns into a
4955 // uint64_t, which messes things up if we have a negative offset difference.
4956 Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth());
4957
4958 // Create a GEP with the byte offset between the counted object and the
4959 // count and use that to load the count value.
4960 ArrayInst = Builder.CreatePointerBitCastOrAddrSpaceCast(ArrayInst,
4961 Int8PtrTy, Int8Ty);
4962
4963 llvm::Type *BoundsType = ConvertType(CountFD->getType());
4964 llvm::Value *BoundsVal =
4965 Builder.CreateInBoundsGEP(Int8Ty, ArrayInst.emitRawPointer(*this),
4966 Builder.getInt32(*Diff), ".counted_by.gep");
4967 BoundsVal = Builder.CreateAlignedLoad(BoundsType, BoundsVal, getIntAlign(),
4968 ".counted_by.load");
4969
4970 // Now emit the bounds checking.
4971 EmitBoundsCheckImpl(ArrayExpr, ArrayType, IndexVal, IndexType, BoundsVal,
4972 CountFD->getType(), Accessed);
4973 }
4974}
4975
4977 bool Accessed) {
4978 // The index must always be an integer, which is not an aggregate. Emit it
4979 // in lexical order (this complexity is, sadly, required by C++17).
4980 llvm::Value *IdxPre =
4981 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4982 bool SignedIndices = false;
4983 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4984 auto *Idx = IdxPre;
4985 if (E->getLHS() != E->getIdx()) {
4986 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4987 Idx = EmitScalarExpr(E->getIdx());
4988 }
4989
4990 QualType IdxTy = E->getIdx()->getType();
4991 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4992 SignedIndices |= IdxSigned;
4993
4994 if (SanOpts.has(SanitizerKind::ArrayBounds))
4995 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4996
4997 // Extend or truncate the index type to 32 or 64-bits.
4998 if (Promote && Idx->getType() != IntPtrTy)
4999 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
5000
5001 return Idx;
5002 };
5003 IdxPre = nullptr;
5004
5005 // If the base is a vector type, then we are forming a vector element lvalue
5006 // with this subscript.
5007 if (E->getBase()->getType()->isSubscriptableVectorType() &&
5009 // Emit the vector as an lvalue to get its address.
5010 LValue LHS = EmitLValue(E->getBase());
5011 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
5012 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
5013 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
5014 LHS.getBaseInfo(), TBAAAccessInfo());
5015 }
5016
5017 // The HLSL runtime handles subscript expressions on global resource arrays
5018 // and objects with HLSL buffer layouts.
5019 if (getLangOpts().HLSL) {
5020 std::optional<LValue> LV;
5021 if (E->getType()->isHLSLResourceRecord() ||
5023 LV = CGM.getHLSLRuntime().emitResourceArraySubscriptExpr(E, *this);
5024 } else if (E->getType().getAddressSpace() == LangAS::hlsl_constant) {
5025 LV = CGM.getHLSLRuntime().emitBufferArraySubscriptExpr(E, *this,
5026 EmitIdxAfterBase);
5027 }
5028 if (LV.has_value())
5029 return *LV;
5030 }
5031
5032 // All the other cases basically behave like simple offsetting.
5033
5034 // Handle the extvector case we ignored above.
5036 LValue LV = EmitLValue(E->getBase());
5037 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5039
5040 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
5041 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
5042 SignedIndices, E->getExprLoc());
5043 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
5044 CGM.getTBAAInfoForSubobject(LV, EltType));
5045 }
5046
5047 LValueBaseInfo EltBaseInfo;
5048 TBAAAccessInfo EltTBAAInfo;
5050 if (const VariableArrayType *vla =
5051 getContext().getAsVariableArrayType(E->getType())) {
5052 // The base must be a pointer, which is not an aggregate. Emit
5053 // it. It needs to be emitted first in case it's what captures
5054 // the VLA bounds.
5055 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5056 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5057
5058 // The element count here is the total number of non-VLA elements.
5059 llvm::Value *numElements = getVLASize(vla).NumElts;
5060
5061 // Effectively, the multiply by the VLA size is part of the GEP.
5062 // GEP indexes are signed, and scaling an index isn't permitted to
5063 // signed-overflow, so we use the same semantics for our explicit
5064 // multiply. We suppress this if overflow is not undefined behavior.
5065 if (getLangOpts().PointerOverflowDefined) {
5066 Idx = Builder.CreateMul(Idx, numElements);
5067 } else {
5068 Idx = Builder.CreateNSWMul(Idx, numElements);
5069 }
5070
5071 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
5072 !getLangOpts().PointerOverflowDefined,
5073 SignedIndices, E->getExprLoc());
5074
5075 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
5076 // Indexing over an interface, as in "NSString *P; P[4];"
5077
5078 // Emit the base pointer.
5079 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5080 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5081
5082 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
5083 llvm::Value *InterfaceSizeVal =
5084 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
5085
5086 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
5087
5088 // We don't necessarily build correct LLVM struct types for ObjC
5089 // interfaces, so we can't rely on GEP to do this scaling
5090 // correctly, so we need to cast to i8*. FIXME: is this actually
5091 // true? A lot of other things in the fragile ABI would break...
5092 llvm::Type *OrigBaseElemTy = Addr.getElementType();
5093
5094 // Do the GEP.
5095 CharUnits EltAlign =
5096 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
5097 llvm::Value *EltPtr =
5098 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
5099 ScaledIdx, false, SignedIndices, E->getExprLoc());
5100 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
5101 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
5102 // If this is A[i] where A is an array, the frontend will have decayed the
5103 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
5104 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
5105 // "gep x, i" here. Emit one "gep A, 0, i".
5106 assert(Array->getType()->isArrayType() &&
5107 "Array to pointer decay must have array source type!");
5108 LValue ArrayLV;
5109 // For simple multidimensional array indexing, set the 'accessed' flag for
5110 // better bounds-checking of the base expression.
5111 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
5112 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
5113 else
5114 ArrayLV = EmitLValue(Array);
5115 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5116
5117 if (SanOpts.has(SanitizerKind::ArrayBounds))
5118 EmitCountedByBoundsChecking(Array, Array->getType(), ArrayLV.getAddress(),
5119 E->getIdx()->getType(), Idx, Accessed,
5120 /*FlexibleArray=*/true);
5121
5122 // Propagate the alignment from the array itself to the result.
5123 QualType arrayType = Array->getType();
5125 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
5126 E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices,
5127 E->getExprLoc(), &arrayType, E->getBase());
5128 EltBaseInfo = ArrayLV.getBaseInfo();
5129 if (!CGM.getCodeGenOpts().NewStructPathTBAA) {
5130 // Since CodeGenTBAA::getTypeInfoHelper only handles array types for
5131 // new struct path TBAA, we must a use a plain access.
5132 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
5133 } else if (ArrayLV.getTBAAInfo().isMayAlias()) {
5134 EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5135 } else if (ArrayLV.getTBAAInfo().isIncomplete()) {
5136 // The array element is complete, even if the array is not.
5137 EltTBAAInfo = CGM.getTBAAAccessInfo(E->getType());
5138 } else {
5139 // The TBAA access info from the array (base) lvalue is ordinary. We will
5140 // adapt it to create access info for the element.
5141 EltTBAAInfo = ArrayLV.getTBAAInfo();
5142
5143 // We retain the TBAA struct path (BaseType and Offset members) from the
5144 // array. In the TBAA representation, we map any array access to the
5145 // element at index 0, as the index is generally a runtime value. This
5146 // element has the same offset in the base type as the array itself.
5147 // If the array lvalue had no base type, there is no point trying to
5148 // generate one, since an array itself is not a valid base type.
5149
5150 // We also retain the access type from the base lvalue, but the access
5151 // size must be updated to the size of an individual element.
5152 EltTBAAInfo.Size =
5154 }
5155 } else {
5156 // The base must be a pointer; emit it with an estimate of its alignment.
5157 Address BaseAddr =
5158 EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5159 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5160 QualType ptrType = E->getBase()->getType();
5161 Addr = emitArraySubscriptGEP(*this, BaseAddr, Idx, E->getType(),
5162 !getLangOpts().PointerOverflowDefined,
5163 SignedIndices, E->getExprLoc(), &ptrType,
5164 E->getBase());
5165
5166 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
5167 StructFieldAccess Visitor;
5168 const Expr *Base = Visitor.Visit(E->getBase());
5169
5170 if (const auto *CE = dyn_cast_if_present<CastExpr>(Base);
5171 CE && CE->getCastKind() == CK_LValueToRValue)
5173 E->getIdx()->getType(), Idx, Accessed,
5174 /*FlexibleArray=*/false);
5175 }
5176 }
5177
5178 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
5179
5180 if (getLangOpts().ObjC &&
5181 getLangOpts().getGC() != LangOptions::NonGC) {
5184 }
5185 return LV;
5186}
5187
5189 llvm::Value *Idx = EmitScalarExpr(E);
5190 if (Idx->getType() == IntPtrTy)
5191 return Idx;
5192 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
5193 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
5194}
5195
5197 const MatrixSingleSubscriptExpr *E) {
5198 LValue Base = EmitLValue(E->getBase());
5199 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
5200
5201 RawAddress MatAddr = Base.getAddress();
5202 if (getLangOpts().HLSL &&
5204 MatAddr = CGM.getHLSLRuntime().createBufferMatrixTempAddress(
5205 Base, E->getExprLoc(), *this);
5206
5207 return LValue::MakeMatrixRow(MaybeConvertMatrixAddress(MatAddr, *this),
5208 RowIdx, E->getBase()->getType(),
5209 Base.getBaseInfo(), TBAAAccessInfo());
5210}
5211
5213 assert(
5214 !E->isIncomplete() &&
5215 "incomplete matrix subscript expressions should be rejected during Sema");
5216 LValue Base = EmitLValue(E->getBase());
5217
5218 // Extend or truncate the index type to 32 or 64-bits if needed.
5219 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
5220 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
5221 llvm::MatrixBuilder MB(Builder);
5222 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
5223 unsigned NumCols = MatrixTy->getNumColumns();
5224 unsigned NumRows = MatrixTy->getNumRows();
5225 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
5227 llvm::Value *FinalIdx =
5228 MB.CreateIndex(RowIdx, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
5229
5230 return LValue::MakeMatrixElt(
5231 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
5232 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
5233}
5234
5236 LValueBaseInfo &BaseInfo,
5237 TBAAAccessInfo &TBAAInfo,
5238 QualType BaseTy, QualType ElTy,
5239 bool IsLowerBound) {
5240 LValue BaseLVal;
5241 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
5242 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
5243 if (BaseTy->isArrayType()) {
5244 Address Addr = BaseLVal.getAddress();
5245 BaseInfo = BaseLVal.getBaseInfo();
5246
5247 // If the array type was an incomplete type, we need to make sure
5248 // the decay ends up being the right type.
5249 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
5250 Addr = Addr.withElementType(NewTy);
5251
5252 // Note that VLA pointers are always decayed, so we don't need to do
5253 // anything here.
5254 if (!BaseTy->isVariableArrayType()) {
5255 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
5256 "Expected pointer to array");
5257 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
5258 }
5259
5260 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
5261 }
5262 LValueBaseInfo TypeBaseInfo;
5263 TBAAAccessInfo TypeTBAAInfo;
5264 CharUnits Align =
5265 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
5266 BaseInfo.mergeForCast(TypeBaseInfo);
5267 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
5268 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
5269 CGF.ConvertTypeForMem(ElTy), Align);
5270 }
5271 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
5272}
5273
5275 bool IsLowerBound) {
5276
5277 assert(!E->isOpenACCArraySection() &&
5278 "OpenACC Array section codegen not implemented");
5279
5281 QualType ResultExprTy;
5282 if (auto *AT = getContext().getAsArrayType(BaseTy))
5283 ResultExprTy = AT->getElementType();
5284 else
5285 ResultExprTy = BaseTy->getPointeeType();
5286 llvm::Value *Idx = nullptr;
5287 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
5288 // Requesting lower bound or upper bound, but without provided length and
5289 // without ':' symbol for the default length -> length = 1.
5290 // Idx = LowerBound ?: 0;
5291 if (auto *LowerBound = E->getLowerBound()) {
5292 Idx = Builder.CreateIntCast(
5293 EmitScalarExpr(LowerBound), IntPtrTy,
5294 LowerBound->getType()->hasSignedIntegerRepresentation());
5295 } else
5296 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
5297 } else {
5298 // Try to emit length or lower bound as constant. If this is possible, 1
5299 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
5300 // IR (LB + Len) - 1.
5301 auto &C = CGM.getContext();
5302 auto *Length = E->getLength();
5303 llvm::APSInt ConstLength;
5304 if (Length) {
5305 // Idx = LowerBound + Length - 1;
5306 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
5307 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
5308 Length = nullptr;
5309 }
5310 auto *LowerBound = E->getLowerBound();
5311 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
5312 if (LowerBound) {
5313 if (std::optional<llvm::APSInt> LB =
5314 LowerBound->getIntegerConstantExpr(C)) {
5315 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
5316 LowerBound = nullptr;
5317 }
5318 }
5319 if (!Length)
5320 --ConstLength;
5321 else if (!LowerBound)
5322 --ConstLowerBound;
5323
5324 if (Length || LowerBound) {
5325 auto *LowerBoundVal =
5326 LowerBound
5327 ? Builder.CreateIntCast(
5328 EmitScalarExpr(LowerBound), IntPtrTy,
5329 LowerBound->getType()->hasSignedIntegerRepresentation())
5330 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
5331 auto *LengthVal =
5332 Length
5333 ? Builder.CreateIntCast(
5334 EmitScalarExpr(Length), IntPtrTy,
5335 Length->getType()->hasSignedIntegerRepresentation())
5336 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
5337 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
5338 /*HasNUW=*/false,
5339 !getLangOpts().PointerOverflowDefined);
5340 if (Length && LowerBound) {
5341 Idx = Builder.CreateSub(
5342 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
5343 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
5344 }
5345 } else
5346 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
5347 } else {
5348 // Idx = ArraySize - 1;
5349 QualType ArrayTy = BaseTy->isPointerType()
5351 : BaseTy;
5352 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
5353 Length = VAT->getSizeExpr();
5354 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
5355 ConstLength = *L;
5356 Length = nullptr;
5357 }
5358 } else {
5359 auto *CAT = C.getAsConstantArrayType(ArrayTy);
5360 assert(CAT && "unexpected type for array initializer");
5361 ConstLength = CAT->getSize();
5362 }
5363 if (Length) {
5364 auto *LengthVal = Builder.CreateIntCast(
5365 EmitScalarExpr(Length), IntPtrTy,
5366 Length->getType()->hasSignedIntegerRepresentation());
5367 Idx = Builder.CreateSub(
5368 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
5369 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
5370 } else {
5371 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
5372 --ConstLength;
5373 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
5374 }
5375 }
5376 }
5377 assert(Idx);
5378
5379 Address EltPtr = Address::invalid();
5380 LValueBaseInfo BaseInfo;
5381 TBAAAccessInfo TBAAInfo;
5382 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
5383 // The base must be a pointer, which is not an aggregate. Emit
5384 // it. It needs to be emitted first in case it's what captures
5385 // the VLA bounds.
5386 Address Base =
5387 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
5388 BaseTy, VLA->getElementType(), IsLowerBound);
5389 // The element count here is the total number of non-VLA elements.
5390 llvm::Value *NumElements = getVLASize(VLA).NumElts;
5391
5392 // Effectively, the multiply by the VLA size is part of the GEP.
5393 // GEP indexes are signed, and scaling an index isn't permitted to
5394 // signed-overflow, so we use the same semantics for our explicit
5395 // multiply. We suppress this if overflow is not undefined behavior.
5396 if (getLangOpts().PointerOverflowDefined)
5397 Idx = Builder.CreateMul(Idx, NumElements);
5398 else
5399 Idx = Builder.CreateNSWMul(Idx, NumElements);
5400 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
5401 !getLangOpts().PointerOverflowDefined,
5402 /*signedIndices=*/false, E->getExprLoc());
5403 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
5404 // If this is A[i] where A is an array, the frontend will have decayed the
5405 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
5406 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
5407 // "gep x, i" here. Emit one "gep A, 0, i".
5408 assert(Array->getType()->isArrayType() &&
5409 "Array to pointer decay must have array source type!");
5410 LValue ArrayLV;
5411 // For simple multidimensional array indexing, set the 'accessed' flag for
5412 // better bounds-checking of the base expression.
5413 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
5414 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
5415 else
5416 ArrayLV = EmitLValue(Array);
5417
5418 // Propagate the alignment from the array itself to the result.
5419 EltPtr = emitArraySubscriptGEP(
5420 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
5421 ResultExprTy, !getLangOpts().PointerOverflowDefined,
5422 /*signedIndices=*/false, E->getExprLoc());
5423 BaseInfo = ArrayLV.getBaseInfo();
5424 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
5425 } else {
5426 Address Base =
5427 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
5428 ResultExprTy, IsLowerBound);
5429 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
5430 !getLangOpts().PointerOverflowDefined,
5431 /*signedIndices=*/false, E->getExprLoc());
5432 }
5433
5434 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
5435}
5436
5439 // Emit the base vector as an l-value.
5440 LValue Base;
5441
5442 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
5443 if (E->isArrow()) {
5444 // If it is a pointer to a vector, emit the address and form an lvalue with
5445 // it.
5446 LValueBaseInfo BaseInfo;
5447 TBAAAccessInfo TBAAInfo;
5448 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
5449 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
5450 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
5451 Base.getQuals().removeObjCGCAttr();
5452 } else if (E->getBase()->isGLValue()) {
5453 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
5454 // emit the base as an lvalue.
5455 assert(E->getBase()->getType()->isVectorType());
5456 Base = EmitLValue(E->getBase());
5457 } else {
5458 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
5459 assert(E->getBase()->getType()->isVectorType() &&
5460 "Result must be a vector");
5461 llvm::Value *Vec = EmitScalarExpr(E->getBase());
5462
5463 // Store the vector to memory (because LValue wants an address).
5464 Address VecMem = CreateMemTemp(E->getBase()->getType());
5465 // need to zero extend an hlsl boolean vector to store it back to memory
5466 QualType Ty = E->getBase()->getType();
5467 llvm::Type *LTy = convertTypeForLoadStore(Ty, Vec->getType());
5468 if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits())
5469 Vec = Builder.CreateZExt(Vec, LTy);
5470 Builder.CreateStore(Vec, VecMem);
5472 }
5473
5474 QualType type =
5475 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
5476
5477 // Encode the element access list into a vector of unsigned indices.
5479 E->getEncodedElementAccess(Indices);
5480
5481 if (Base.isSimple()) {
5482 llvm::Constant *CV =
5483 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
5484 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
5485 Base.getBaseInfo(), TBAAAccessInfo());
5486 }
5487
5488 if (Base.isMatrixRow()) {
5489 if (auto *RowIdx =
5490 llvm::dyn_cast<llvm::ConstantInt>(Base.getMatrixRowIdx())) {
5492 QualType MatTy = Base.getType();
5493 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
5494 unsigned NumCols = Indices.size();
5495 unsigned NumRows = MT->getNumRows();
5496 unsigned Row = RowIdx->getZExtValue();
5497 QualType VecQT = E->getBase()->getType();
5498 if (NumCols != MT->getNumColumns()) {
5499 const auto *EVT = VecQT->getAs<ExtVectorType>();
5500 QualType ElemQT = EVT->getElementType();
5501 VecQT = getContext().getExtVectorType(ElemQT, NumCols);
5502 }
5503 for (unsigned C = 0; C < NumCols; ++C) {
5504 unsigned Col = Indices[C];
5505 unsigned Linear = Col * NumRows + Row;
5506 MatIndices.push_back(llvm::ConstantInt::get(Int32Ty, Linear));
5507 }
5508
5509 llvm::Constant *ConstIdxs = llvm::ConstantVector::get(MatIndices);
5510 return LValue::MakeExtVectorElt(Base.getMatrixAddress(), ConstIdxs, VecQT,
5511 Base.getBaseInfo(), TBAAAccessInfo());
5512 }
5513 llvm::Constant *Cols =
5514 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
5515 // Note: intentionally not using E.getType() so we can reuse isMatrixRow()
5516 // implementations in EmitLoadOfLValue & EmitStoreThroughLValue and don't
5517 // need the LValue to have its own number of rows and columns when the
5518 // type is a vector.
5520 Base.getMatrixAddress(), Base.getMatrixRowIdx(), Cols, Base.getType(),
5521 Base.getBaseInfo(), TBAAAccessInfo());
5522 }
5523
5524 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
5525
5526 llvm::Constant *BaseElts = Base.getExtVectorElts();
5528
5529 for (unsigned Index : Indices)
5530 CElts.push_back(BaseElts->getAggregateElement(Index));
5531 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
5532 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
5533 Base.getBaseInfo(), TBAAAccessInfo());
5534}
5535
5537 const Expr *UnderlyingBaseExpr = E->IgnoreParens();
5538 while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(UnderlyingBaseExpr))
5539 UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens();
5540 return getContext().isSentinelNullExpr(UnderlyingBaseExpr);
5541}
5542
5544 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
5546 return EmitDeclRefLValue(DRE);
5547 }
5548
5549 if (getLangOpts().HLSL) {
5550 QualType QT = E->getType();
5552 return CGM.getHLSLRuntime().emitBufferMemberExpr(*this, E);
5553
5555 std::optional<LValue> LV;
5556 LV = CGM.getHLSLRuntime().emitResourceMemberExpr(*this, E);
5557 if (LV.has_value())
5558 return *LV;
5559 }
5560 }
5561
5562 Expr *BaseExpr = E->getBase();
5563 // Check whether the underlying base pointer is a constant null.
5564 // If so, we do not set inbounds flag for GEP to avoid breaking some
5565 // old-style offsetof idioms.
5566 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
5568 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
5569 LValue BaseLV;
5570 if (E->isArrow()) {
5571 LValueBaseInfo BaseInfo;
5572 TBAAAccessInfo TBAAInfo;
5573 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
5574 QualType PtrTy = BaseExpr->getType()->getPointeeType();
5575 SanitizerSet SkippedChecks;
5576 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
5577 if (IsBaseCXXThis)
5578 SkippedChecks.set(SanitizerKind::Alignment, true);
5579 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
5580 SkippedChecks.set(SanitizerKind::Null, true);
5582 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
5583 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
5584 } else
5585 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
5586
5587 NamedDecl *ND = E->getMemberDecl();
5588 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
5589 LValue LV = EmitLValueForField(BaseLV, Field, IsInBounds);
5591 if (getLangOpts().OpenMP) {
5592 // If the member was explicitly marked as nontemporal, mark it as
5593 // nontemporal. If the base lvalue is marked as nontemporal, mark access
5594 // to children as nontemporal too.
5595 if ((IsWrappedCXXThis(BaseExpr) &&
5596 CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
5597 BaseLV.isNontemporal())
5598 LV.setNontemporal(/*Value=*/true);
5599 }
5600 return LV;
5601 }
5602
5603 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
5604 return EmitFunctionDeclLValue(*this, E, FD);
5605
5606 llvm_unreachable("Unhandled member declaration!");
5607}
5608
5609/// Given that we are currently emitting a lambda, emit an l-value for
5610/// one of its members.
5611///
5613 llvm::Value *ThisValue) {
5614 bool HasExplicitObjectParameter = false;
5615 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
5616 if (MD) {
5617 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
5618 assert(MD->getParent()->isLambda());
5619 assert(MD->getParent() == Field->getParent());
5620 }
5621 LValue LambdaLV;
5622 if (HasExplicitObjectParameter) {
5623 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
5624 auto It = LocalDeclMap.find(D);
5625 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
5626 Address AddrOfExplicitObject = It->getSecond();
5627 if (D->getType()->isReferenceType())
5628 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
5630 else
5631 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
5633
5634 // Make sure we have an lvalue to the lambda itself and not a derived class.
5635 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
5636 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
5637 if (ThisTy != LambdaTy) {
5638 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
5640 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
5641 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
5643 LambdaLV = MakeAddrLValue(Base, T);
5644 }
5645 } else {
5646 CanQualType LambdaTagType =
5647 getContext().getCanonicalTagType(Field->getParent());
5648 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
5649 }
5650 return EmitLValueForField(LambdaLV, Field);
5651}
5652
5654 return EmitLValueForLambdaField(Field, CXXABIThisValue);
5655}
5656
5657/// Get the field index in the debug info. The debug info structure/union
5658/// will ignore the unnamed bitfields.
5660 unsigned FieldIndex) {
5661 unsigned I = 0, Skipped = 0;
5662
5663 for (auto *F : Rec->getDefinition()->fields()) {
5664 if (I == FieldIndex)
5665 break;
5666 if (F->isUnnamedBitField())
5667 Skipped++;
5668 I++;
5669 }
5670
5671 return FieldIndex - Skipped;
5672}
5673
5674/// Get the address of a zero-sized field within a record. The resulting
5675/// address doesn't necessarily have the right type.
5677 const FieldDecl *Field,
5678 bool IsInBounds) {
5680 CGF.getContext().getFieldOffset(Field));
5681 if (Offset.isZero())
5682 return Base;
5683 Base = Base.withElementType(CGF.Int8Ty);
5684 if (!IsInBounds)
5685 return CGF.Builder.CreateConstByteGEP(Base, Offset);
5686 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
5687}
5688
5689/// Drill down to the storage of a field without walking into reference types,
5690/// and without respect for pointer field protection.
5691///
5692/// The resulting address doesn't necessarily have the right type.
5694 const FieldDecl *field,
5695 bool IsInBounds) {
5696 if (isEmptyFieldForLayout(CGF.getContext(), field))
5697 return emitAddrOfZeroSizeField(CGF, base, field, IsInBounds);
5698
5699 const RecordDecl *rec = field->getParent();
5700
5701 unsigned idx =
5702 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5703 llvm::Type *StructType =
5705
5706 if (CGF.getLangOpts().EmitLogicalPointer)
5707 return RawAddress(
5708 CGF.Builder.CreateStructuredGEP(StructType, base.emitRawPointer(CGF),
5709 {CGF.Builder.getSize(idx)}),
5710 base.getElementType(), base.getAlignment());
5711
5712 if (!IsInBounds)
5713 return CGF.Builder.CreateConstGEP2_32(base, 0, idx, field->getName());
5714
5715 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
5716}
5717
5718/// Drill down to the storage of a field without walking into reference types,
5719/// wrapping the address in an llvm.protected.field.ptr intrinsic for the
5720/// pointer field protection feature if necessary.
5721///
5722/// The resulting address doesn't necessarily have the right type.
5724 const FieldDecl *field, bool IsInBounds) {
5725 Address Addr = emitRawAddrOfFieldStorage(CGF, base, field, IsInBounds);
5726
5727 if (!CGF.getContext().isPFPField(field))
5728 return Addr;
5729
5730 return CGF.EmitAddressOfPFPField(base, Addr, field);
5731}
5732
5734 Address addr, const FieldDecl *field) {
5735 const RecordDecl *rec = field->getParent();
5736 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
5737 base.getType(), rec->getLocation());
5738
5739 unsigned idx =
5740 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5741
5743 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
5744}
5745
5746static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
5747 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
5748 if (!RD)
5749 return false;
5750
5751 if (RD->isDynamicClass())
5752 return true;
5753
5754 for (const auto &Base : RD->bases())
5755 if (hasAnyVptr(Base.getType(), Context))
5756 return true;
5757
5758 for (const FieldDecl *Field : RD->fields())
5759 if (hasAnyVptr(Field->getType(), Context))
5760 return true;
5761
5762 return false;
5763}
5764
5766 bool IsInBounds) {
5767 LValueBaseInfo BaseInfo = base.getBaseInfo();
5768
5769 if (field->isBitField()) {
5770 const CGRecordLayout &RL =
5771 CGM.getTypes().getCGRecordLayout(field->getParent());
5772 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
5773 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
5774 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
5775 Info.VolatileStorageSize != 0 &&
5776 field->getType()
5779 Address Addr = base.getAddress();
5780 unsigned Idx = RL.getLLVMFieldNo(field);
5781 const RecordDecl *rec = field->getParent();
5784 if (!UseVolatile) {
5785 if (!IsInPreservedAIRegion &&
5786 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5787 if (Idx != 0) {
5788 // For structs, we GEP to the field that the record layout suggests.
5789 if (!IsInBounds)
5790 Addr = Builder.CreateConstGEP2_32(Addr, 0, Idx, field->getName());
5791 else
5792 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
5793 }
5794 } else {
5795 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
5796 getContext().getCanonicalTagType(rec), rec->getLocation());
5797 Addr = Builder.CreatePreserveStructAccessIndex(
5798 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
5799 DbgInfo);
5800 }
5801 }
5802 const unsigned SS =
5803 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
5804 // Get the access type.
5805 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
5806 Addr = Addr.withElementType(FieldIntTy);
5807 if (UseVolatile) {
5808 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
5809 if (VolatileOffset)
5810 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
5811 }
5812
5813 QualType fieldType =
5814 field->getType().withCVRQualifiers(base.getVRQualifiers());
5815 // TODO: Support TBAA for bit fields.
5816 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
5817 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
5818 TBAAAccessInfo());
5819 }
5820
5821 // Fields of may-alias structures are may-alias themselves.
5822 // FIXME: this should get propagated down through anonymous structs
5823 // and unions.
5824 QualType FieldType = field->getType();
5825 const RecordDecl *rec = field->getParent();
5826 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
5827 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
5828 TBAAAccessInfo FieldTBAAInfo;
5829 if (base.getTBAAInfo().isMayAlias() ||
5830 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
5831 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5832 } else if (rec->isUnion()) {
5833 // TODO: Support TBAA for unions.
5834 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5835 } else {
5836 // If no base type been assigned for the base access, then try to generate
5837 // one for this base lvalue.
5838 FieldTBAAInfo = base.getTBAAInfo();
5839 if (!FieldTBAAInfo.BaseType) {
5840 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
5841 assert(!FieldTBAAInfo.Offset &&
5842 "Nonzero offset for an access with no base type!");
5843 }
5844
5845 // Adjust offset to be relative to the base type.
5846 const ASTRecordLayout &Layout =
5848 unsigned CharWidth = getContext().getCharWidth();
5849 if (FieldTBAAInfo.BaseType)
5850 FieldTBAAInfo.Offset +=
5851 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
5852
5853 // Update the final access type and size.
5854 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
5855 FieldTBAAInfo.Size =
5857 }
5858
5859 Address addr = base.getAddress();
5861 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
5862 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
5863 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5864 ClassDef->isDynamicClass()) {
5865 // Getting to any field of dynamic object requires stripping dynamic
5866 // information provided by invariant.group. This is because accessing
5867 // fields may leak the real address of dynamic object, which could result
5868 // in miscompilation when leaked pointer would be compared.
5869 auto *stripped =
5870 Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
5871 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5872 }
5873 }
5874
5875 unsigned RecordCVR = base.getVRQualifiers();
5876 if (rec->isUnion()) {
5877 // For unions, there is no pointer adjustment.
5878 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5879 hasAnyVptr(FieldType, getContext()))
5880 // Because unions can easily skip invariant.barriers, we need to add
5881 // a barrier every time CXXRecord field with vptr is referenced.
5882 addr = Builder.CreateLaunderInvariantGroup(addr);
5883
5885 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5886 // Remember the original union field index
5887 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5888 rec->getLocation());
5889 addr =
5890 Address(Builder.CreatePreserveUnionAccessIndex(
5891 addr.emitRawPointer(*this),
5892 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5893 addr.getElementType(), addr.getAlignment());
5894 }
5895
5896 if (FieldType->isReferenceType())
5897 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5898 } else {
5899 if (!IsInPreservedAIRegion &&
5900 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5901 // For structs, we GEP to the field that the record layout suggests.
5902 addr = emitAddrOfFieldStorage(*this, addr, field, IsInBounds);
5903 else
5904 // Remember the original struct field index
5905 addr = emitPreserveStructAccess(*this, base, addr, field);
5906 }
5907
5908 // If this is a reference field, load the reference right now.
5909 if (FieldType->isReferenceType()) {
5910 LValue RefLVal =
5911 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5912 if (RecordCVR & Qualifiers::Volatile)
5913 RefLVal.getQuals().addVolatile();
5914 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5915
5916 // Qualifiers on the struct don't apply to the referencee.
5917 RecordCVR = 0;
5918 FieldType = FieldType->getPointeeType();
5919 }
5920
5921 // Make sure that the address is pointing to the right type. This is critical
5922 // for both unions and structs.
5923 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5924
5925 if (field->hasAttr<AnnotateAttr>())
5926 addr = EmitFieldAnnotations(field, addr);
5927
5928 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5929 LV.getQuals().addCVRQualifiers(RecordCVR);
5930
5931 // __weak attribute on a field is ignored.
5934
5935 return LV;
5936}
5937
5938LValue
5940 const FieldDecl *Field) {
5941 QualType FieldType = Field->getType();
5942
5943 if (!FieldType->isReferenceType())
5944 return EmitLValueForField(Base, Field);
5945
5947 *this, Base.getAddress(), Field,
5948 /*IsInBounds=*/!getLangOpts().PointerOverflowDefined);
5949
5950 // Make sure that the address is pointing to the right type.
5951 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5952 V = V.withElementType(llvmType);
5953
5954 // TODO: Generate TBAA information that describes this access as a structure
5955 // member access and not just an access to an object of the field's type. This
5956 // should be similar to what we do in EmitLValueForField().
5957 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5958 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5959 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5960 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5961 CGM.getTBAAInfoForSubobject(Base, FieldType));
5962}
5963
5965 if (E->isFileScope()) {
5966 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5967 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5968 }
5969 if (E->getType()->isVariablyModifiedType())
5970 // make sure to emit the VLA size.
5972
5973 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5974 const Expr *InitExpr = E->getInitializer();
5976
5977 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5978 /*Init*/ true);
5979
5980 // Block-scope compound literals are destroyed at the end of the enclosing
5981 // scope in C.
5982 if (!getLangOpts().CPlusPlus)
5985 E->getType(), getDestroyer(DtorKind),
5986 DtorKind & EHCleanup);
5987
5988 return Result;
5989}
5990
5992 if (!E->isGLValue())
5993 // Initializing an aggregate temporary in C++11: T{...}.
5994 return EmitAggExprToLValue(E);
5995
5996 // An lvalue initializer list must be initializing a reference.
5997 assert(E->isTransparent() && "non-transparent glvalue init list");
5998 return EmitLValue(E->getInit(0));
5999}
6000
6001/// Emit the operand of a glvalue conditional operator. This is either a glvalue
6002/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
6003/// LValue is returned and the current block has been terminated.
6004static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
6005 const Expr *Operand) {
6006 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
6007 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
6008 return std::nullopt;
6009 }
6010
6011 return CGF.EmitLValue(Operand);
6012}
6013
6014namespace {
6015// Handle the case where the condition is a constant evaluatable simple integer,
6016// which means we don't have to separately handle the true/false blocks.
6017std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
6018 CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
6019 const Expr *condExpr = E->getCond();
6020 bool CondExprBool;
6021 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
6022 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
6023 if (!CondExprBool)
6024 std::swap(Live, Dead);
6025
6026 if (!CGF.ContainsLabel(Dead)) {
6027 // If the true case is live, we need to track its region.
6028 CGF.incrementProfileCounter(CondExprBool ? CGF.UseExecPath
6029 : CGF.UseSkipPath,
6030 E, /*UseBoth=*/true);
6031 CGF.markStmtMaybeUsed(Dead);
6032 // If a throw expression we emit it and return an undefined lvalue
6033 // because it can't be used.
6034 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
6035 CGF.EmitCXXThrowExpr(ThrowExpr);
6036 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
6037 llvm::Type *Ty = CGF.DefaultPtrTy;
6038 return CGF.MakeAddrLValue(
6039 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
6040 Dead->getType());
6041 }
6042 return CGF.EmitLValue(Live);
6043 }
6044 }
6045 return std::nullopt;
6046}
6047struct ConditionalInfo {
6048 llvm::BasicBlock *lhsBlock, *rhsBlock;
6049 std::optional<LValue> LHS, RHS;
6050};
6051
6052// Create and generate the 3 blocks for a conditional operator.
6053// Leaves the 'current block' in the continuation basic block.
6054template<typename FuncTy>
6055ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
6056 const AbstractConditionalOperator *E,
6057 const FuncTy &BranchGenFunc) {
6058 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
6059 CGF.createBasicBlock("cond.false"), std::nullopt,
6060 std::nullopt};
6061 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
6062
6064 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
6065 CGF.getProfileCount(E));
6066
6067 // Any temporaries created here are conditional.
6068 CGF.EmitBlock(Info.lhsBlock);
6070 eval.begin(CGF);
6071 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
6072 eval.end(CGF);
6073 Info.lhsBlock = CGF.Builder.GetInsertBlock();
6074
6075 if (Info.LHS)
6076 CGF.Builder.CreateBr(endBlock);
6077
6078 // Any temporaries created here are conditional.
6079 CGF.EmitBlock(Info.rhsBlock);
6081 eval.begin(CGF);
6082 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
6083 eval.end(CGF);
6084 Info.rhsBlock = CGF.Builder.GetInsertBlock();
6085 CGF.EmitBlock(endBlock);
6086
6087 return Info;
6088}
6089} // namespace
6090
6092 const AbstractConditionalOperator *E) {
6093 if (!E->isGLValue()) {
6094 // ?: here should be an aggregate.
6095 assert(hasAggregateEvaluationKind(E->getType()) &&
6096 "Unexpected conditional operator!");
6097 return (void)EmitAggExprToLValue(E);
6098 }
6099
6100 OpaqueValueMapping binding(*this, E);
6101 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
6102 return;
6103
6104 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
6105 CGF.EmitIgnoredExpr(E);
6106 return LValue{};
6107 });
6108}
6111 if (!expr->isGLValue()) {
6112 // ?: here should be an aggregate.
6113 assert(hasAggregateEvaluationKind(expr->getType()) &&
6114 "Unexpected conditional operator!");
6115 return EmitAggExprToLValue(expr);
6116 }
6117
6118 OpaqueValueMapping binding(*this, expr);
6119 if (std::optional<LValue> Res =
6120 HandleConditionalOperatorLValueSimpleCase(*this, expr))
6121 return *Res;
6122
6123 ConditionalInfo Info = EmitConditionalBlocks(
6124 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
6125 return EmitLValueOrThrowExpression(CGF, E);
6126 });
6127
6128 if ((Info.LHS && !Info.LHS->isSimple()) ||
6129 (Info.RHS && !Info.RHS->isSimple()))
6130 return EmitUnsupportedLValue(expr, "conditional operator");
6131
6132 if (Info.LHS && Info.RHS) {
6133 Address lhsAddr = Info.LHS->getAddress();
6134 Address rhsAddr = Info.RHS->getAddress();
6136 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
6137 Builder.GetInsertBlock(), expr->getType());
6138 AlignmentSource alignSource =
6139 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
6140 Info.RHS->getBaseInfo().getAlignmentSource());
6141 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
6142 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
6143 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
6144 TBAAInfo);
6145 } else {
6146 assert((Info.LHS || Info.RHS) &&
6147 "both operands of glvalue conditional are throw-expressions?");
6148 return Info.LHS ? *Info.LHS : *Info.RHS;
6149 }
6150}
6151
6152/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
6153/// type. If the cast is to a reference, we can have the usual lvalue result,
6154/// otherwise if a cast is needed by the code generator in an lvalue context,
6155/// then it must mean that we need the address of an aggregate in order to
6156/// access one of its members. This can happen for all the reasons that casts
6157/// are permitted with aggregate result, including noop aggregate casts, and
6158/// cast from scalar to union.
6160 llvm::scope_exit RestoreCurCast([this, Prev = CurCast] { CurCast = Prev; });
6161 CurCast = E;
6162 switch (E->getCastKind()) {
6163 case CK_ToVoid:
6164 case CK_BitCast:
6165 case CK_LValueToRValueBitCast:
6166 case CK_ArrayToPointerDecay:
6167 case CK_FunctionToPointerDecay:
6168 case CK_NullToMemberPointer:
6169 case CK_NullToPointer:
6170 case CK_IntegralToPointer:
6171 case CK_PointerToIntegral:
6172 case CK_PointerToBoolean:
6173 case CK_IntegralCast:
6174 case CK_BooleanToSignedIntegral:
6175 case CK_IntegralToBoolean:
6176 case CK_IntegralToFloating:
6177 case CK_FloatingToIntegral:
6178 case CK_FloatingToBoolean:
6179 case CK_FloatingCast:
6180 case CK_FloatingRealToComplex:
6181 case CK_FloatingComplexToReal:
6182 case CK_FloatingComplexToBoolean:
6183 case CK_FloatingComplexCast:
6184 case CK_FloatingComplexToIntegralComplex:
6185 case CK_IntegralRealToComplex:
6186 case CK_IntegralComplexToReal:
6187 case CK_IntegralComplexToBoolean:
6188 case CK_IntegralComplexCast:
6189 case CK_IntegralComplexToFloatingComplex:
6190 case CK_DerivedToBaseMemberPointer:
6191 case CK_BaseToDerivedMemberPointer:
6192 case CK_MemberPointerToBoolean:
6193 case CK_ReinterpretMemberPointer:
6194 case CK_AnyPointerToBlockPointerCast:
6195 case CK_ARCProduceObject:
6196 case CK_ARCConsumeObject:
6197 case CK_ARCReclaimReturnedObject:
6198 case CK_ARCExtendBlockObject:
6199 case CK_CopyAndAutoreleaseBlockObject:
6200 case CK_IntToOCLSampler:
6201 case CK_FloatingToFixedPoint:
6202 case CK_FixedPointToFloating:
6203 case CK_FixedPointCast:
6204 case CK_FixedPointToBoolean:
6205 case CK_FixedPointToIntegral:
6206 case CK_IntegralToFixedPoint:
6207 case CK_MatrixCast:
6208 case CK_HLSLVectorTruncation:
6209 case CK_HLSLMatrixTruncation:
6210 case CK_HLSLArrayRValue:
6211 case CK_HLSLElementwiseCast:
6212 case CK_HLSLAggregateSplatCast:
6213 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
6214
6215 case CK_Dependent:
6216 llvm_unreachable("dependent cast kind in IR gen!");
6217
6218 case CK_BuiltinFnToFnPtr:
6219 llvm_unreachable("builtin functions are handled elsewhere");
6220
6221 // These are never l-values; just use the aggregate emission code.
6222 case CK_NonAtomicToAtomic:
6223 case CK_AtomicToNonAtomic:
6224 return EmitAggExprToLValue(E);
6225
6226 case CK_Dynamic: {
6227 LValue LV = EmitLValue(E->getSubExpr());
6228 Address V = LV.getAddress();
6229 const auto *DCE = cast<CXXDynamicCastExpr>(E);
6231 }
6232
6233 case CK_ConstructorConversion:
6234 case CK_UserDefinedConversion:
6235 case CK_CPointerToObjCPointerCast:
6236 case CK_BlockPointerToObjCPointerCast:
6237 case CK_LValueToRValue:
6238 return EmitLValue(E->getSubExpr());
6239
6240 case CK_NoOp: {
6241 // CK_NoOp can model a qualification conversion, which can remove an array
6242 // bound and change the IR type.
6243 // FIXME: Once pointee types are removed from IR, remove this.
6244 LValue LV = EmitLValue(E->getSubExpr());
6245 // Propagate the volatile qualifer to LValue, if exist in E.
6247 LV.getQuals() = E->getType().getQualifiers();
6248 if (LV.isSimple()) {
6249 Address V = LV.getAddress();
6250 if (V.isValid()) {
6251 llvm::Type *T = ConvertTypeForMem(E->getType());
6252 if (V.getElementType() != T)
6253 LV.setAddress(V.withElementType(T));
6254 }
6255 }
6256 return LV;
6257 }
6258
6259 case CK_UncheckedDerivedToBase:
6260 case CK_DerivedToBase: {
6261 auto *DerivedClassDecl = E->getSubExpr()->getType()->castAsCXXRecordDecl();
6262 LValue LV = EmitLValue(E->getSubExpr());
6263 Address This = LV.getAddress();
6264
6265 // Perform the derived-to-base conversion
6267 This, DerivedClassDecl, E->path_begin(), E->path_end(),
6268 /*NullCheckValue=*/false, E->getExprLoc());
6269
6270 // TODO: Support accesses to members of base classes in TBAA. For now, we
6271 // conservatively pretend that the complete object is of the base class
6272 // type.
6273 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
6274 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6275 }
6276 case CK_ToUnion:
6277 return EmitAggExprToLValue(E);
6278 case CK_BaseToDerived: {
6279 auto *DerivedClassDecl = E->getType()->castAsCXXRecordDecl();
6280 LValue LV = EmitLValue(E->getSubExpr());
6281
6282 // Perform the base-to-derived conversion
6284 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
6285 /*NullCheckValue=*/false);
6286
6287 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
6288 // performed and the object is not of the derived type.
6291 E->getType());
6292
6293 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
6294 EmitVTablePtrCheckForCast(E->getType(), Derived,
6295 /*MayBeNull=*/false, CFITCK_DerivedCast,
6296 E->getBeginLoc());
6297
6298 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
6299 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6300 }
6301 case CK_LValueBitCast: {
6302 // This must be a reinterpret_cast (or c-style equivalent).
6303 const auto *CE = cast<ExplicitCastExpr>(E);
6304
6305 CGM.EmitExplicitCastExprType(CE, this);
6306 LValue LV = EmitLValue(E->getSubExpr());
6308 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
6309
6310 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
6312 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
6313 E->getBeginLoc());
6314
6315 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
6316 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6317 }
6318 case CK_AddressSpaceConversion: {
6319 LValue LV = EmitLValue(E->getSubExpr());
6320 QualType DestTy = getContext().getPointerType(E->getType());
6321 llvm::Value *V =
6322 performAddrSpaceCast(LV.getPointer(*this), ConvertType(DestTy));
6324 LV.getAddress().getAlignment()),
6325 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
6326 }
6327 case CK_ObjCObjectLValueCast: {
6328 LValue LV = EmitLValue(E->getSubExpr());
6330 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
6331 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6332 }
6333 case CK_ZeroToOCLOpaqueType:
6334 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
6335
6336 case CK_VectorSplat: {
6337 // LValue results of vector splats are only supported in HLSL.
6338 if (!getLangOpts().HLSL)
6339 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
6340 return EmitLValue(E->getSubExpr());
6341 }
6342 }
6343
6344 llvm_unreachable("Unhandled lvalue cast kind?");
6345}
6346
6351
6352std::pair<LValue, LValue>
6354 // Emitting the casted temporary through an opaque value.
6355 LValue BaseLV = EmitLValue(E->getArgLValue());
6357
6358 QualType ExprTy = E->getType();
6359 Address OutTemp = CreateIRTempWithoutCast(ExprTy);
6360 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
6361
6362 if (E->isInOut())
6364 TempLV);
6365
6367 return std::make_pair(BaseLV, TempLV);
6368}
6369
6371 CallArgList &Args, QualType Ty) {
6372
6373 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
6374
6375 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
6376 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
6377
6379
6380 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
6381 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast());
6382 Args.add(RValue::get(TmpAddr, *this), Ty);
6383 return TempLV;
6384}
6385
6386LValue
6389
6390 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
6391 it = OpaqueLValues.find(e);
6392
6393 if (it != OpaqueLValues.end())
6394 return it->second;
6395
6396 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
6397 return EmitLValue(e->getSourceExpr());
6398}
6399
6400RValue
6403
6404 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
6405 it = OpaqueRValues.find(e);
6406
6407 if (it != OpaqueRValues.end())
6408 return it->second;
6409
6410 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
6411 return EmitAnyExpr(e->getSourceExpr());
6412}
6413
6416 return OpaqueLValues.contains(E);
6417 return OpaqueRValues.contains(E);
6418}
6419
6421 const FieldDecl *FD,
6422 SourceLocation Loc) {
6423 QualType FT = FD->getType();
6424 LValue FieldLV = EmitLValueForField(LV, FD);
6425 switch (getEvaluationKind(FT)) {
6426 case TEK_Complex:
6427 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
6428 case TEK_Aggregate:
6429 return FieldLV.asAggregateRValue();
6430 case TEK_Scalar:
6431 // This routine is used to load fields one-by-one to perform a copy, so
6432 // don't load reference fields.
6433 if (FD->getType()->isReferenceType())
6434 return RValue::get(FieldLV.getPointer(*this));
6435 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
6436 // primitive load.
6437 if (FieldLV.isBitField())
6438 return EmitLoadOfLValue(FieldLV, Loc);
6439 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
6440 }
6441 llvm_unreachable("bad evaluation kind");
6442}
6443
6444//===--------------------------------------------------------------------===//
6445// Expression Emission
6446//===--------------------------------------------------------------------===//
6447
6450 llvm::CallBase **CallOrInvoke) {
6451 llvm::CallBase *CallOrInvokeStorage;
6452 if (!CallOrInvoke) {
6453 CallOrInvoke = &CallOrInvokeStorage;
6454 }
6455
6456 llvm::scope_exit AddCoroElideSafeOnExit([&] {
6457 if (E->isCoroElideSafe()) {
6458 auto *I = *CallOrInvoke;
6459 if (I)
6460 I->addFnAttr(llvm::Attribute::CoroElideSafe);
6461 }
6462 });
6463
6464 // Builtins never have block type.
6465 if (E->getCallee()->getType()->isBlockPointerType())
6466 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
6467
6468 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
6469 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
6470
6471 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
6472 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
6473
6474 // A CXXOperatorCallExpr is created even for explicit object methods, but
6475 // these should be treated like static function call.
6476 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
6477 if (const auto *MD =
6478 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
6479 MD && MD->isImplicitObjectMemberFunction())
6480 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
6481
6482 CGCallee callee = EmitCallee(E->getCallee());
6483
6484 if (callee.isBuiltin()) {
6485 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
6486 E, ReturnValue);
6487 }
6488
6489 if (callee.isPseudoDestructor()) {
6491 }
6492
6493 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
6494 /*Chain=*/nullptr, CallOrInvoke);
6495}
6496
6497/// Emit a CallExpr without considering whether it might be a subclass.
6500 llvm::CallBase **CallOrInvoke) {
6501 CGCallee Callee = EmitCallee(E->getCallee());
6502 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
6503 /*Chain=*/nullptr, CallOrInvoke);
6504}
6505
6506// Detect the unusual situation where an inline version is shadowed by a
6507// non-inline version. In that case we should pick the external one
6508// everywhere. That's GCC behavior too.
6510 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
6511 if (!PD->isInlineBuiltinDeclaration())
6512 return false;
6513 return true;
6514}
6515
6517 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
6518
6519 if (auto builtinID = FD->getBuiltinID()) {
6520 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
6521 std::string NoBuiltins = "no-builtins";
6522
6523 StringRef Ident = CGF.CGM.getMangledName(GD);
6524 std::string FDInlineName = (Ident + ".inline").str();
6525
6526 bool IsPredefinedLibFunction =
6528 bool HasAttributeNoBuiltin =
6529 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
6530 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
6531
6532 // When directing calling an inline builtin, call it through it's mangled
6533 // name to make it clear it's not the actual builtin.
6534 if (CGF.CurFn->getName() != FDInlineName &&
6536 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6537 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
6538 llvm::Module *M = Fn->getParent();
6539 llvm::Function *Clone = M->getFunction(FDInlineName);
6540 if (!Clone) {
6541 Clone = llvm::Function::Create(Fn->getFunctionType(),
6542 llvm::GlobalValue::InternalLinkage,
6543 Fn->getAddressSpace(), FDInlineName, M);
6544 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
6545 }
6546 return CGCallee::forDirect(Clone, GD);
6547 }
6548
6549 // Replaceable builtins provide their own implementation of a builtin. If we
6550 // are in an inline builtin implementation, avoid trivial infinite
6551 // recursion. Honor __attribute__((no_builtin("foo"))) or
6552 // __attribute__((no_builtin)) on the current function unless foo is
6553 // not a predefined library function which means we must generate the
6554 // builtin no matter what.
6555 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
6556 return CGCallee::forBuiltin(builtinID, FD);
6557 }
6558
6559 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6560 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
6561 FD->hasAttr<CUDAGlobalAttr>())
6562 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
6563 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
6564
6565 return CGCallee::forDirect(CalleePtr, GD);
6566}
6567
6569 if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6571 return GlobalDecl(FD);
6572}
6573
6575 E = E->IgnoreParens();
6576
6577 // Look through function-to-pointer decay.
6578 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
6579 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
6580 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
6581 return EmitCallee(ICE->getSubExpr());
6582 }
6583
6584 // Try to remember the original __ptrauth qualifier for loads of
6585 // function pointers.
6586 if (ICE->getCastKind() == CK_LValueToRValue) {
6587 const Expr *SubExpr = ICE->getSubExpr();
6588 if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) {
6589 std::pair<llvm::Value *, CGPointerAuthInfo> Result =
6591
6593 assert(FunctionType->isFunctionType());
6594
6595 GlobalDecl GD;
6596 if (const auto *VD =
6597 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) {
6598 GD = GlobalDecl(VD);
6599 }
6601 CGCallee Callee(CalleeInfo, Result.first, Result.second);
6602 return Callee;
6603 }
6604 }
6605
6606 // Resolve direct calls.
6607 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
6608 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
6610 }
6611 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
6612 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
6613 EmitIgnoredExpr(ME->getBase());
6614 return EmitDirectCallee(*this, FD);
6615 }
6616
6617 // Look through template substitutions.
6618 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
6619 return EmitCallee(NTTP->getReplacement());
6620
6621 // Treat pseudo-destructor calls differently.
6622 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
6624 }
6625
6626 // Otherwise, we have an indirect reference.
6627 llvm::Value *calleePtr;
6629 if (auto ptrType = E->getType()->getAs<PointerType>()) {
6630 calleePtr = EmitScalarExpr(E);
6631 functionType = ptrType->getPointeeType();
6632 } else {
6633 functionType = E->getType();
6634 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
6635 }
6636 assert(functionType->isFunctionType());
6637
6638 GlobalDecl GD;
6639 if (const auto *VD =
6640 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
6641 GD = GlobalDecl(VD);
6642
6643 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
6644 CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
6645 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
6646 return callee;
6647}
6648
6650 // Comma expressions just emit their LHS then their RHS as an l-value.
6651 if (E->getOpcode() == BO_Comma) {
6652 EmitIgnoredExpr(E->getLHS());
6654 return EmitLValue(E->getRHS());
6655 }
6656
6657 if (E->getOpcode() == BO_PtrMemD ||
6658 E->getOpcode() == BO_PtrMemI)
6660
6661 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
6662
6663 // Create a Key Instructions source location atom group that covers both
6664 // LHS and RHS expressions. Nested RHS expressions may get subsequently
6665 // separately grouped (1 below):
6666 //
6667 // 1. `a = b = c` -> Two atoms.
6668 // 2. `x = new(1)` -> One atom (for both addr store and value store).
6669 // 3. Complex and agg assignment -> One atom.
6671
6672 // Note that in all of these cases, __block variables need the RHS
6673 // evaluated first just in case the variable gets moved by the RHS.
6674
6675 switch (getEvaluationKind(E->getType())) {
6676 case TEK_Scalar: {
6677 if (PointerAuthQualifier PtrAuth =
6678 E->getLHS()->getType().getPointerAuth()) {
6680 LValue CopiedLV = LV;
6681 CopiedLV.getQuals().removePointerAuth();
6682 llvm::Value *RV =
6683 EmitPointerAuthQualify(PtrAuth, E->getRHS(), CopiedLV.getAddress());
6684 EmitNullabilityCheck(CopiedLV, RV, E->getExprLoc());
6685 EmitStoreThroughLValue(RValue::get(RV), CopiedLV);
6686 return LV;
6687 }
6688
6689 switch (E->getLHS()->getType().getObjCLifetime()) {
6691 return EmitARCStoreStrong(E, /*ignored*/ false).first;
6692
6694 return EmitARCStoreAutoreleasing(E).first;
6695
6696 // No reason to do any of these differently.
6700 break;
6701 }
6702
6703 // TODO: Can we de-duplicate this code with the corresponding code in
6704 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
6705 RValue RV;
6706 llvm::Value *Previous = nullptr;
6707 QualType SrcType = E->getRHS()->getType();
6708 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
6709 // we want to extract that value and potentially (if the bitfield sanitizer
6710 // is enabled) use it to check for an implicit conversion.
6711 if (E->getLHS()->refersToBitField()) {
6712 llvm::Value *RHS =
6714 RV = RValue::get(RHS);
6715 } else
6716 RV = EmitAnyExpr(E->getRHS());
6717
6719
6720 if (RV.isScalar())
6722
6723 if (LV.isBitField()) {
6724 llvm::Value *Result = nullptr;
6725 // If bitfield sanitizers are enabled we want to use the result
6726 // to check whether a truncation or sign change has occurred.
6727 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
6729 else
6731
6732 // If the expression contained an implicit conversion, make sure
6733 // to use the value before the scalar conversion.
6734 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
6735 QualType DstType = E->getLHS()->getType();
6736 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
6737 LV.getBitFieldInfo(), E->getExprLoc());
6738 } else
6739 EmitStoreThroughLValue(RV, LV);
6740
6741 if (getLangOpts().OpenMP)
6742 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
6743 E->getLHS());
6744 return LV;
6745 }
6746
6747 case TEK_Complex:
6749
6750 case TEK_Aggregate:
6751 // If the lang opt is HLSL and the LHS is a constant array
6752 // then we are performing a copy assignment and call a special
6753 // function because EmitAggExprToLValue emits to a temporary LValue
6755 return EmitHLSLArrayAssignLValue(E);
6756
6757 return EmitAggExprToLValue(E);
6758 }
6759 llvm_unreachable("bad evaluation kind");
6760}
6761
6762// This function implements trivial copy assignment for HLSL's
6763// assignable constant arrays.
6765 // Don't emit an LValue for the RHS because it might not be an LValue
6766 LValue LHS = EmitLValue(E->getLHS());
6767
6768 // If the RHS is a global resource array, copy all individual resources
6769 // into LHS.
6771 if (CGM.getHLSLRuntime().emitResourceArrayCopy(LHS, E->getRHS(), *this))
6772 return LHS;
6773
6774 // In C the RHS of an assignment operator is an RValue.
6775 // EmitAggregateAssign takes an LValue for the RHS. Instead we can call
6776 // EmitInitializationToLValue to emit an RValue into an LValue.
6778 return LHS;
6779}
6780
6782 llvm::CallBase **CallOrInvoke) {
6783 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
6784
6785 if (!RV.isScalar())
6786 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6788
6789 assert(E->getCallReturnType(getContext())->isReferenceType() &&
6790 "Can't have a scalar return unless the return type is a "
6791 "reference type!");
6792
6794}
6795
6797 // FIXME: This shouldn't require another copy.
6798 return EmitAggExprToLValue(E);
6799}
6800
6803 && "binding l-value to type which needs a temporary");
6804 AggValueSlot Slot = CreateAggTemp(E->getType());
6805 EmitCXXConstructExpr(E, Slot);
6807}
6808
6809LValue
6813
6815 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
6816 .withElementType(ConvertType(E->getType()));
6817}
6818
6823
6824LValue
6832
6835
6836 if (!RV.isScalar())
6837 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6839
6840 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
6841 "Can't have a scalar return unless the return type is a "
6842 "reference type!");
6843
6845}
6846
6848 Address V =
6849 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
6851}
6852
6854 const ObjCIvarDecl *Ivar) {
6855 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
6856}
6857
6858llvm::Value *
6860 const ObjCIvarDecl *Ivar) {
6861 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
6862 QualType PointerDiffType = getContext().getPointerDiffType();
6863 return Builder.CreateZExtOrTrunc(OffsetValue,
6864 getTypes().ConvertType(PointerDiffType));
6865}
6866
6868 llvm::Value *BaseValue,
6869 const ObjCIvarDecl *Ivar,
6870 unsigned CVRQualifiers) {
6871 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
6872 Ivar, CVRQualifiers);
6873}
6874
6876 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
6877 llvm::Value *BaseValue = nullptr;
6878 const Expr *BaseExpr = E->getBase();
6879 Qualifiers BaseQuals;
6880 QualType ObjectTy;
6881 if (E->isArrow()) {
6882 BaseValue = EmitScalarExpr(BaseExpr);
6883 ObjectTy = BaseExpr->getType()->getPointeeType();
6884 BaseQuals = ObjectTy.getQualifiers();
6885 } else {
6886 LValue BaseLV = EmitLValue(BaseExpr);
6887 BaseValue = BaseLV.getPointer(*this);
6888 ObjectTy = BaseExpr->getType();
6889 BaseQuals = ObjectTy.getQualifiers();
6890 }
6891
6892 LValue LV =
6893 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
6894 BaseQuals.getCVRQualifiers());
6896 return LV;
6897}
6898
6900 // Can only get l-value for message expression returning aggregate type
6901 RValue RV = EmitAnyExprToTemp(E);
6902 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6904}
6905
6907 const CGCallee &OrigCallee, const CallExpr *E,
6909 llvm::Value *Chain,
6910 llvm::CallBase **CallOrInvoke,
6911 CGFunctionInfo const **ResolvedFnInfo) {
6912 // Get the actual function type. The callee type will always be a pointer to
6913 // function type or a block pointer type.
6914 assert(CalleeType->isFunctionPointerType() &&
6915 "Call must have function pointer type!");
6916
6917 const Decl *TargetDecl =
6918 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
6919
6920 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
6921 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
6922 "trying to emit a call to an immediate function");
6923
6924 CalleeType = getContext().getCanonicalType(CalleeType);
6925
6926 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
6927
6928 CGCallee Callee = OrigCallee;
6929
6930 bool CFIUnchecked = CalleeType->hasPointeeToCFIUncheckedCalleeFunctionType();
6931
6932 if (SanOpts.has(SanitizerKind::Function) &&
6933 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6934 !isa<FunctionNoProtoType>(PointeeType) && !CFIUnchecked) {
6935 if (llvm::Constant *PrefixSig =
6936 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
6937 auto CheckOrdinal = SanitizerKind::SO_Function;
6938 auto CheckHandler = SanitizerHandler::FunctionTypeMismatch;
6939 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6940 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6941
6942 llvm::Type *PrefixSigType = PrefixSig->getType();
6943 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6944 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6945
6946 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6947 if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
6948 // Use raw pointer since we are using the callee pointer as data here.
6949 Address Addr =
6950 Address(CalleePtr, CalleePtr->getType(),
6952 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6953 Callee.getPointerAuthInfo(), nullptr);
6954 CalleePtr = Addr.emitRawPointer(*this);
6955 }
6956
6957 // On 32-bit Arm, the low bit of a function pointer indicates whether
6958 // it's using the Arm or Thumb instruction set. The actual first
6959 // instruction lives at the same address either way, so we must clear
6960 // that low bit before using the function address to find the prefix
6961 // structure.
6962 //
6963 // This applies to both Arm and Thumb target triples, because
6964 // either one could be used in an interworking context where it
6965 // might be passed function pointers of both types.
6966 llvm::Value *AlignedCalleePtr;
6967 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6968 AlignedCalleePtr = Builder.CreateIntrinsic(
6969 CalleePtr->getType(), llvm::Intrinsic::ptrmask,
6970 {CalleePtr, llvm::ConstantInt::getSigned(IntPtrTy, ~1)});
6971 } else {
6972 AlignedCalleePtr = CalleePtr;
6973 }
6974
6975 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6976 llvm::Value *CalleeSigPtr =
6977 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6978 llvm::Value *CalleeSig =
6979 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6980 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6981
6982 llvm::BasicBlock *Cont = createBasicBlock("cont");
6983 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6984 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6985
6986 EmitBlock(TypeCheck);
6987 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6988 Int32Ty,
6989 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6990 getPointerAlign());
6991 llvm::Value *CalleeTypeHashMatch =
6992 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6993 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6994 EmitCheckTypeDescriptor(CalleeType)};
6995 EmitCheck(std::make_pair(CalleeTypeHashMatch, CheckOrdinal), CheckHandler,
6996 StaticData, {CalleePtr});
6997
6998 Builder.CreateBr(Cont);
6999 EmitBlock(Cont);
7000 }
7001 }
7002
7003 const auto *FnType = cast<FunctionType>(PointeeType);
7004
7005 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
7006 FD && DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
7007 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType);
7008
7009 // If we are checking indirect calls and this call is indirect, check that the
7010 // function pointer is a member of the bit set for the function type.
7011 if (SanOpts.has(SanitizerKind::CFIICall) &&
7012 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && !CFIUnchecked) {
7013 auto CheckOrdinal = SanitizerKind::SO_CFIICall;
7014 auto CheckHandler = SanitizerHandler::CFICheckFail;
7015 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
7016 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
7017
7018 llvm::Metadata *MD =
7019 CGM.CreateMetadataIdentifierForFnType(QualType(FnType, 0));
7020
7021 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
7022
7023 llvm::Value *CalleePtr = Callee.getFunctionPointer();
7024 llvm::Value *TypeTest = Builder.CreateCall(
7025 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
7026
7027 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
7028 llvm::Constant *StaticData[] = {
7029 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
7032 };
7033 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
7034 EmitCfiSlowPathCheck(CheckOrdinal, TypeTest, CrossDsoTypeId, CalleePtr,
7035 StaticData);
7036 } else {
7037 EmitCheck(std::make_pair(TypeTest, CheckOrdinal), CheckHandler,
7038 StaticData, {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
7039 }
7040 }
7041
7042 CallArgList Args;
7043 if (Chain)
7044 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
7045
7046 // C++17 requires that we evaluate arguments to a call using assignment syntax
7047 // right-to-left, and that we evaluate arguments to certain other operators
7048 // left-to-right. Note that we allow this to override the order dictated by
7049 // the calling convention on the MS ABI, which means that parameter
7050 // destruction order is not necessarily reverse construction order.
7051 // FIXME: Revisit this based on C++ committee response to unimplementability.
7053 bool StaticOperator = false;
7054 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
7055 if (OCE->isAssignmentOp())
7057 else {
7058 switch (OCE->getOperator()) {
7059 case OO_LessLess:
7060 case OO_GreaterGreater:
7061 case OO_AmpAmp:
7062 case OO_PipePipe:
7063 case OO_Comma:
7064 case OO_ArrowStar:
7066 break;
7067 default:
7068 break;
7069 }
7070 }
7071
7072 if (const auto *MD =
7073 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
7074 MD && MD->isStatic())
7075 StaticOperator = true;
7076 }
7077
7078 auto Arguments = E->arguments();
7079 if (StaticOperator) {
7080 // If we're calling a static operator, we need to emit the object argument
7081 // and ignore it.
7082 EmitIgnoredExpr(E->getArg(0));
7083 Arguments = drop_begin(Arguments, 1);
7084 }
7085 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
7086 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
7087
7088 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
7089 Args, FnType, /*ChainCall=*/Chain);
7090
7091 if (ResolvedFnInfo)
7092 *ResolvedFnInfo = &FnInfo;
7093
7094 // HIP function pointer contains kernel handle when it is used in triple
7095 // chevron. The kernel stub needs to be loaded from kernel handle and used
7096 // as callee.
7097 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
7099 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
7100 llvm::Value *Handle = Callee.getFunctionPointer();
7101 auto *Stub = Builder.CreateLoad(
7102 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
7103 Callee.setFunctionPointer(Stub);
7104 }
7105
7106 // Insert function pointer lookup if this is a target call
7107 //
7108 // This is used for the indirect function case, virtual function case is
7109 // handled in ItaniumCXXABI.cpp
7110 if (getLangOpts().OpenMPIsTargetDevice && CGM.getTriple().isGPU() &&
7111 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
7112 const Expr *CalleeExpr = E->getCallee()->IgnoreParenImpCasts();
7113 const DeclRefExpr *DRE = nullptr;
7114 while (CalleeExpr) {
7115 if ((DRE = dyn_cast<DeclRefExpr>(CalleeExpr)))
7116 break;
7117 if (const auto *ME = dyn_cast<MemberExpr>(CalleeExpr))
7118 CalleeExpr = ME->getBase()->IgnoreParenImpCasts();
7119 else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(CalleeExpr))
7120 CalleeExpr = ASE->getBase()->IgnoreParenImpCasts();
7121 else
7122 break;
7123 }
7124
7125 const auto *VD = DRE ? dyn_cast<VarDecl>(DRE->getDecl()) : nullptr;
7126 if (VD && VD->hasAttr<OMPTargetIndirectCallAttr>()) {
7127 auto *FuncPtrTy = llvm::PointerType::get(
7128 CGM.getLLVMContext(), CGM.getDataLayout().getProgramAddressSpace());
7129 llvm::Type *RtlFnArgs[] = {FuncPtrTy};
7130 llvm::FunctionCallee DeviceRtlFn = CGM.CreateRuntimeFunction(
7131 llvm::FunctionType::get(FuncPtrTy, RtlFnArgs, false),
7132 "__llvm_omp_indirect_call_lookup");
7133 llvm::Value *Func = Callee.getFunctionPointer();
7134 llvm::Type *BackupTy = Func->getType();
7135 Func = Builder.CreatePointerBitCastOrAddrSpaceCast(Func, FuncPtrTy);
7136 Func = EmitRuntimeCall(DeviceRtlFn, {Func});
7137 Func = Builder.CreatePointerBitCastOrAddrSpaceCast(Func, BackupTy);
7138 Callee.setFunctionPointer(Func);
7139 }
7140 }
7141
7142 llvm::CallBase *LocalCallOrInvoke = nullptr;
7143 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
7144 E == MustTailCall, E->getExprLoc());
7145
7146 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
7147 if (CalleeDecl->hasAttr<RestrictAttr>() ||
7148 CalleeDecl->hasAttr<MallocSpanAttr>() ||
7149 CalleeDecl->hasAttr<AllocSizeAttr>()) {
7150 // Function has 'malloc' (aka. 'restrict') or 'alloc_size' attribute.
7151 if (SanOpts.has(SanitizerKind::AllocToken)) {
7152 // Set !alloc_token metadata.
7153 EmitAllocToken(LocalCallOrInvoke, E);
7154 }
7155 }
7156 }
7157 if (CallOrInvoke)
7158 *CallOrInvoke = LocalCallOrInvoke;
7159
7160 return Call;
7161}
7162
7165 Address BaseAddr = Address::invalid();
7166 if (E->getOpcode() == BO_PtrMemI) {
7167 BaseAddr = EmitPointerWithAlignment(E->getLHS());
7168 } else {
7169 BaseAddr = EmitLValue(E->getLHS()).getAddress();
7170 }
7171
7172 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
7173 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
7174
7175 LValueBaseInfo BaseInfo;
7176 TBAAAccessInfo TBAAInfo;
7177 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
7180 E, BaseAddr, OffsetV, MPT, IsInBounds, &BaseInfo, &TBAAInfo);
7181
7182 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
7183}
7184
7185/// Given the address of a temporary variable, produce an r-value of
7186/// its type.
7188 QualType type,
7189 SourceLocation loc) {
7191 switch (getEvaluationKind(type)) {
7192 case TEK_Complex:
7193 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
7194 case TEK_Aggregate:
7195 return lvalue.asAggregateRValue();
7196 case TEK_Scalar:
7197 return RValue::get(EmitLoadOfScalar(lvalue, loc));
7198 }
7199 llvm_unreachable("bad evaluation kind");
7200}
7201
7202void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
7203 assert(Val->getType()->isFPOrFPVectorTy());
7204 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
7205 return;
7206
7207 llvm::MDBuilder MDHelper(getLLVMContext());
7208 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
7209
7210 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
7211}
7212
7214 llvm::Type *EltTy = Val->getType()->getScalarType();
7215 if (!EltTy->isFloatTy() && !EltTy->isHalfTy())
7216 return;
7217
7218 if ((getLangOpts().OpenCL &&
7219 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
7220 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
7221 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
7222 // OpenCL v1.1 s7.4: minimum accuracy of single precision sqrt is 3 ulp.
7223 // OpenCL v3.0 s7.4: minimum accuracy of half precision sqrt is 1.5 ulp.
7224 //
7225 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
7226 // build option allows an application to specify that single precision
7227 // floating-point divide (x/y and 1/x) and sqrt used in the program
7228 // source are correctly rounded.
7229 //
7230 // TODO: CUDA has a prec-sqrt flag
7231 SetFPAccuracy(Val, EltTy->isFloatTy() ? 3.0f : 1.5f);
7232 }
7233}
7234
7236 llvm::Type *EltTy = Val->getType()->getScalarType();
7237 if (!EltTy->isFloatTy() && !EltTy->isHalfTy())
7238 return;
7239
7240 if ((getLangOpts().OpenCL &&
7241 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
7242 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
7243 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
7244 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5 ulp.
7245 // OpenCL v3.0 s7.4: minimum accuracy of half precision / is 1 ulp.
7246 //
7247 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
7248 // build option allows an application to specify that single precision
7249 // floating-point divide (x/y and 1/x) and sqrt used in the program
7250 // source are correctly rounded.
7251 //
7252 // TODO: CUDA has a prec-div flag
7253 SetFPAccuracy(Val, EltTy->isFloatTy() ? 2.5f : 1.f);
7254 }
7255}
7256
7257namespace {
7258 struct LValueOrRValue {
7259 LValue LV;
7260 RValue RV;
7261 };
7262}
7263
7264static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
7265 const PseudoObjectExpr *E,
7266 bool forLValue,
7267 AggValueSlot slot) {
7269
7270 // Find the result expression, if any.
7271 const Expr *resultExpr = E->getResultExpr();
7272 LValueOrRValue result;
7273
7275 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
7276 const Expr *semantic = *i;
7277
7278 // If this semantic expression is an opaque value, bind it
7279 // to the result of its source expression.
7280 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
7281 // Skip unique OVEs.
7282 if (ov->isUnique()) {
7283 assert(ov != resultExpr &&
7284 "A unique OVE cannot be used as the result expression");
7285 continue;
7286 }
7287
7288 // If this is the result expression, we may need to evaluate
7289 // directly into the slot.
7291 OVMA opaqueData;
7292 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
7294 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
7295 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
7297 opaqueData = OVMA::bind(CGF, ov, LV);
7298 result.RV = slot.asRValue();
7299
7300 // Otherwise, emit as normal.
7301 } else {
7302 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
7303
7304 // If this is the result, also evaluate the result now.
7305 if (ov == resultExpr) {
7306 if (forLValue)
7307 result.LV = CGF.EmitLValue(ov);
7308 else
7309 result.RV = CGF.EmitAnyExpr(ov, slot);
7310 }
7311 }
7312
7313 opaques.push_back(opaqueData);
7314
7315 // Otherwise, if the expression is the result, evaluate it
7316 // and remember the result.
7317 } else if (semantic == resultExpr) {
7318 if (forLValue)
7319 result.LV = CGF.EmitLValue(semantic);
7320 else
7321 result.RV = CGF.EmitAnyExpr(semantic, slot);
7322
7323 // Otherwise, evaluate the expression in an ignored context.
7324 } else {
7325 CGF.EmitIgnoredExpr(semantic);
7326 }
7327 }
7328
7329 // Unbind all the opaques now.
7330 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques)
7331 opaque.unbind(CGF);
7332
7333 return result;
7334}
7335
7337 AggValueSlot slot) {
7338 return emitPseudoObjectExpr(*this, E, false, slot).RV;
7339}
7340
7344
7346 LValue Val, SmallVectorImpl<LValue> &AccessList) {
7347
7349 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
7350 WorkList;
7351 llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32);
7352 WorkList.push_back({Val, Val.getType(), {llvm::ConstantInt::get(IdxTy, 0)}});
7353
7354 while (!WorkList.empty()) {
7355 auto [LVal, T, IdxList] = WorkList.pop_back_val();
7356 T = T.getCanonicalType().getUnqualifiedType();
7357 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) {
7358 uint64_t Size = CAT->getZExtSize();
7359 for (int64_t I = Size - 1; I > -1; I--) {
7360 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7361 IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
7362 WorkList.emplace_back(LVal, CAT->getElementType(), IdxListCopy);
7363 }
7364 } else if (const auto *RT = dyn_cast<RecordType>(T)) {
7365 const RecordDecl *Record = RT->getDecl()->getDefinitionOrSelf();
7366 assert(!Record->isUnion() && "Union types not supported in flat cast.");
7367
7368 const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
7369
7371 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
7372 ReverseList;
7373 if (CXXD && CXXD->isStandardLayout())
7375
7376 // deal with potential base classes
7377 if (CXXD && !CXXD->isStandardLayout()) {
7378 if (CXXD->getNumBases() > 0) {
7379 assert(CXXD->getNumBases() == 1 &&
7380 "HLSL doesn't support multiple inheritance.");
7381 auto Base = CXXD->bases_begin();
7382 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7383 IdxListCopy.push_back(llvm::ConstantInt::get(
7384 IdxTy, 0)); // base struct should be at index zero
7385 ReverseList.emplace_back(LVal, Base->getType(), IdxListCopy);
7386 }
7387 }
7388
7389 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(Record);
7390
7391 llvm::Type *LLVMT = ConvertTypeForMem(T);
7393 LValue RLValue;
7394 bool createdGEP = false;
7395 for (auto *FD : Record->fields()) {
7396 if (FD->isBitField()) {
7397 if (FD->isUnnamedBitField())
7398 continue;
7399 if (!createdGEP) {
7400 createdGEP = true;
7401 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
7402 LLVMT, Align, "gep");
7403 RLValue = MakeAddrLValue(GEP, T);
7404 }
7405 LValue FieldLVal = EmitLValueForField(RLValue, FD, true);
7406 ReverseList.push_back({FieldLVal, FD->getType(), {}});
7407 } else {
7408 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7409 IdxListCopy.push_back(
7410 llvm::ConstantInt::get(IdxTy, Layout.getLLVMFieldNo(FD)));
7411 ReverseList.emplace_back(LVal, FD->getType(), IdxListCopy);
7412 }
7413 }
7414
7415 std::reverse(ReverseList.begin(), ReverseList.end());
7416 llvm::append_range(WorkList, ReverseList);
7417 } else if (const auto *VT = dyn_cast<VectorType>(T)) {
7418 llvm::Type *LLVMT = ConvertTypeForMem(T);
7420 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT,
7421 Align, "vector.gep");
7422 LValue Base = MakeAddrLValue(GEP, T);
7423 for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) {
7424 llvm::Constant *Idx = llvm::ConstantInt::get(IdxTy, I);
7425 LValue LV =
7426 LValue::MakeVectorElt(Base.getAddress(), Idx, VT->getElementType(),
7427 Base.getBaseInfo(), TBAAAccessInfo());
7428 AccessList.emplace_back(LV);
7429 }
7430 } else if (const auto *MT = dyn_cast<ConstantMatrixType>(T)) {
7431 // Matrices are represented as flat arrays in memory, but has a vector
7432 // value type. So we use ConvertMatrixAddress to convert the address from
7433 // array to vector, and extract elements similar to the vector case above.
7434 // The matrix elements are iterated over in row-major order regardless of
7435 // the memory layout of the matrix.
7436 llvm::Type *LLVMT = ConvertTypeForMem(T);
7438 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT,
7439 Align, "matrix.gep");
7440 LValue Base = MakeAddrLValue(GEP, T);
7441 Address MatAddr = MaybeConvertMatrixAddress(Base.getAddress(), *this);
7442 unsigned NumRows = MT->getNumRows();
7443 unsigned NumCols = MT->getNumColumns();
7444 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
7446 llvm::MatrixBuilder MB(Builder);
7447 for (unsigned Row = 0; Row < MT->getNumRows(); Row++) {
7448 for (unsigned Col = 0; Col < MT->getNumColumns(); Col++) {
7449 llvm::Value *RowIdx = llvm::ConstantInt::get(IdxTy, Row);
7450 llvm::Value *ColIdx = llvm::ConstantInt::get(IdxTy, Col);
7451 llvm::Value *Idx = MB.CreateIndex(RowIdx, ColIdx, NumRows, NumCols,
7452 IsMatrixRowMajor);
7453 LValue LV =
7454 LValue::MakeMatrixElt(MatAddr, Idx, MT->getElementType(),
7455 Base.getBaseInfo(), TBAAAccessInfo());
7456 AccessList.emplace_back(LV);
7457 }
7458 }
7459 } else { // a scalar/builtin type
7460 if (!IdxList.empty()) {
7461 llvm::Type *LLVMT = ConvertTypeForMem(T);
7463 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
7464 LLVMT, Align, "gep");
7465 AccessList.emplace_back(MakeAddrLValue(GEP, T));
7466 } else // must be a bitfield we already created an lvalue for
7467 AccessList.emplace_back(LVal);
7468 }
7469 }
7470}
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition CGExpr.cpp:3248
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition CGExpr.cpp:3520
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool StrictBool, bool IsBool)
Definition CGExpr.cpp:2066
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition CGExpr.cpp:725
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition CGExpr.cpp:4664
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition CGExpr.cpp:4865
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition CGExpr.cpp:4729
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type?
Definition CGExpr.cpp:1914
@ CEK_AsReferenceOnly
Definition CGExpr.cpp:1916
@ CEK_AsValueOnly
Definition CGExpr.cpp:1918
@ CEK_None
Definition CGExpr.cpp:1915
@ CEK_AsValueOrReference
Definition CGExpr.cpp:1917
static Address emitRawAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field, bool IsInBounds)
Drill down to the storage of a field without walking into reference types, and without respect for po...
Definition CGExpr.cpp:5693
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition CGExpr.cpp:1887
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition CGExpr.cpp:3508
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition CGExpr.cpp:6004
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition CGExpr.cpp:4100
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition CGExpr.cpp:4678
SmallVector< llvm::Value *, 8 > RecIndicesTy
Definition CGExpr.cpp:1165
static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD)
Definition CGExpr.cpp:6568
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition CGExpr.cpp:3495
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition CGExpr.cpp:2307
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition CGExpr.cpp:7264
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition CGExpr.cpp:4745
static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID)
Definition CGExpr.cpp:93
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition CGExpr.cpp:1017
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition CGExpr.cpp:2485
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition CGExpr.cpp:1920
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field, bool IsInBounds)
Drill down to the storage of a field without walking into reference types, wrapping the address in an...
Definition CGExpr.cpp:5723
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition CGExpr.cpp:4896
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition CGExpr.cpp:6516
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition CGExpr.cpp:3345
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition CGExpr.cpp:1167
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition CGExpr.cpp:6509
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition CGExpr.cpp:3444
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition CGExpr.cpp:5746
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition CGExpr.cpp:4758
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition CGExpr.cpp:3359
VariableTypeDescriptorKind
Definition CGExpr.cpp:78
@ TK_Float
A floating-point type.
Definition CGExpr.cpp:82
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition CGExpr.cpp:86
@ TK_Integer
An integer type.
Definition CGExpr.cpp:80
@ TK_BitInt
An _BitInt(N) type.
Definition CGExpr.cpp:84
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition CGExpr.cpp:2406
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition CGExpr.cpp:1457
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition CGExpr.cpp:5733
const SanitizerHandlerInfo SanitizerHandlers[]
Definition CGExpr.cpp:4117
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition CGExpr.cpp:4123
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition CGExpr.cpp:5235
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static Address emitAddrOfZeroSizeField(CIRGenFunction &cgf, Address base, const FieldDecl *field)
Get the address of a zero-sized field within a record.
FormatToken * Previous
The previous token in the unwrapped line.
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
Defines the clang::Module class, which describes a module in the source code.
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
#define LIST_SANITIZER_CHECKS
SanitizerHandler
llvm::json::Object Object
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
a trap message and trap category.
const LValueBase getLValueBase() const
Definition APValue.cpp:1015
bool isLValue() const
Definition APValue.h:490
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
SourceManager & getSourceManager()
Definition ASTContext.h:866
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
bool isPFPField(const FieldDecl *Field) const
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
Builtin::Context & BuiltinInfo
Definition ASTContext.h:807
const LangOptions & getLangOpts() const
Definition ASTContext.h:959
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
unsigned getTargetAddressSpace(LangAS AS) const
bool isSentinelNullExpr(const Expr *E)
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition Expr.h:7218
Expr * getBase()
Get base of the array section.
Definition Expr.h:7296
Expr * getLength()
Get length of array section.
Definition Expr.h:7306
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition Expr.cpp:5384
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:7335
Expr * getLowerBound()
Get lower bound of array section.
Definition Expr.h:7300
bool isOpenACCArraySection() const
Definition Expr.h:7293
SourceLocation getColonLocFirst() const
Definition Expr.h:7327
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2779
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2753
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3777
QualType getElementType() const
Definition TypeBase.h:3789
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
SourceLocation getExprLoc() const
Definition Expr.h:4082
Expr * getRHS() const
Definition Expr.h:4093
static bool isAdditiveOp(Opcode Opc)
Definition Expr.h:4127
Opcode getOpcode() const
Definition Expr.h:4086
A fixed int type of a specified bitwidth.
Definition TypeBase.h:8288
unsigned getNumBits() const
Definition TypeBase.h:8300
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition Builtins.h:320
Represents binding an expression to a temporary.
Definition ExprCXX.h:1497
CXXTemporary * getTemporary()
Definition ExprCXX.h:1515
const Expr * getSubExpr() const
Definition ExprCXX.h:1519
Represents a call to a C++ constructor.
Definition ExprCXX.h:1552
Represents a C++ destructor within a class.
Definition DeclCXX.h:2889
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1372
bool isStandardLayout() const
Determine whether this class is standard-layout per C++ [class]p7.
Definition DeclCXX.h:1225
unsigned getNumBases() const
Retrieves the number of base classes of this class.
Definition DeclCXX.h:602
base_class_iterator bases_begin()
Definition DeclCXX.h:615
bool isDynamicClass() const
Definition DeclCXX.h:574
bool hasDefinition() const
Definition DeclCXX.h:561
const CXXRecordDecl * getStandardLayoutBaseWithFields() const
If this is a standard-layout class or union, any and all data members will be declared in the same ty...
Definition DeclCXX.cpp:562
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:852
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition ExprCXX.h:1072
MSGuidDecl * getGuidDecl() const
Definition ExprCXX.h:1118
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
bool isCoroElideSafe() const
Definition Expr.h:3120
arg_range arguments()
Definition Expr.h:3198
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
path_iterator path_begin()
Definition Expr.h:3749
CastKind getCastKind() const
Definition Expr.h:3723
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
path_iterator path_end()
Definition Expr.h:3750
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
@ None
Trap Messages are omitted.
@ Detailed
Trap Message includes more context (e.g.
@ Strict
In-memory bool values are assumed to be 0 or 1, and any other value is UB.
bool isOptimizedBuild() const
Are we building at -O1 or higher?
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:261
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
bool isValid() const
Definition Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:551
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:619
Address getAddress() const
Definition CGValue.h:691
void setExternallyDestructed(bool destructed=true)
Definition CGValue.h:660
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:649
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
RValue asRValue() const
Definition CGValue.h:713
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition CGBuilder.h:315
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition CGBuilder.h:302
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition CGBuilder.h:341
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition CGBuilder.h:251
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:229
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Definition CGBuilder.h:325
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition CGBuilder.h:423
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:199
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
Abstract information about a function or function prototype.
Definition CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition CGCall.h:59
All available information about a concrete callee.
Definition CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CGCall.h:172
bool isPseudoDestructor() const
Definition CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition CGCall.h:123
unsigned getBuiltinID() const
Definition CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
bool isBuiltin() const
Definition CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
RawAddress createBufferMatrixTempAddress(const LValue &LV, SourceLocation Loc, CodeGenFunction &CGF)
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
Definition CGCall.h:320
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
Definition CGExpr.cpp:5212
LValue EmitCoawaitLValue(const CoawaitExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2812
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2184
void EmitBoundsCheckImpl(const Expr *ArrayExpr, QualType ArrayBaseType, llvm::Value *IndexVal, QualType IndexType, llvm::Value *BoundsVal, QualType BoundsType, bool Accessed)
Definition CGExpr.cpp:1276
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
Definition CGExpr.cpp:3417
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:591
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
Definition CGExpr.cpp:6801
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:6109
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3711
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
Definition CGExpr.cpp:1357
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7235
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitInitListLValue(const InitListExpr *E)
Definition CGExpr.cpp:5991
bool isUnderlyingBasePointerConstantNull(const Expr *E)
Check whether the underlying base pointer is a constant null.
Definition CGExpr.cpp:5536
void EmitARCInitWeak(Address addr, llvm::Value *value)
i8* @objc_initWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2695
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
Definition CGExpr.cpp:4976
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
Definition CGExpr.cpp:6833
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1198
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
llvm::Type * ConvertType(QualType T)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6814
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
CGCapturedStmtInfo * CapturedStmtInfo
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2360
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
Definition CGClass.cpp:281
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
Definition CGExpr.cpp:3113
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Definition CGExpr.cpp:3883
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
Definition CGExpr.cpp:5964
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:7187
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:3034
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4035
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6819
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7213
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Emit a CallExpr without considering whether it might be a subclass.
Definition CGExpr.cpp:6498
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
Definition CGExpr.cpp:734
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7336
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
Definition CGExpr.cpp:5659
const LangOptions & getLangOpts() const
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
Definition CGExpr.cpp:4404
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:697
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2577
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition CGExpr.cpp:7164
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
Definition CGExpr.cpp:6867
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:388
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:6091
void EmitCountedByBoundsChecking(const Expr *ArrayExpr, QualType ArrayType, Address ArrayInst, QualType IndexType, llvm::Value *IndexVal, bool Accessed, bool FlexibleArray)
EmitCountedByBoundsChecking - If the array being accessed has a "counted_by" attribute,...
Definition CGExpr.cpp:4926
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition CGDecl.cpp:2301
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
Definition CGDecl.cpp:788
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:766
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
Definition CGExpr.cpp:3426
RValue EmitLoadOfGlobalRegLValue(LValue LV)
Load of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:2748
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2953
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6649
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition CGDecl.cpp:2274
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
Definition CGExpr.cpp:2100
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition CGExpr.cpp:7341
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3925
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
Definition CGExpr.cpp:6347
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
Definition CGExpr.cpp:976
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6859
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Definition CGExpr.cpp:2502
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5765
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:181
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, bool IsInBounds, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Emit the address of a field using a member data pointer.
Definition CGClass.cpp:150
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
Definition CGExpr.cpp:6370
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
Definition CGExpr.cpp:739
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6574
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:256
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6448
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2520
LValue EmitMatrixSingleSubscriptExpr(const MatrixSingleSubscriptExpr *E)
Definition CGExpr.cpp:5196
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
Definition CGExpr.cpp:5274
Address GetAddrOfBlockDecl(const VarDecl *var)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
Definition CGExpr.cpp:4365
RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateIRTempWithoutCast - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:188
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
Definition CGExpr.cpp:7202
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1260
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:237
LValue EmitPredefinedLValue(const PredefinedExpr *E)
Definition CGExpr.cpp:3888
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4183
LValue EmitDeclRefLValue(const DeclRefExpr *E)
Definition CGExpr.cpp:3592
LValue EmitStringLiteralLValue(const StringLiteral *E)
Definition CGExpr.cpp:3878
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6401
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2042
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1357
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1645
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1321
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5939
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2242
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:158
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6387
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
Definition CGExpr.cpp:2114
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5359
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6764
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition CGExpr.cpp:229
LValue EmitVAArgExprLValue(const VAArgExpr *E)
Definition CGExpr.cpp:6796
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
Definition CGExpr.cpp:297
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitStmtExprLValue(const StmtExpr *E)
Definition CGExpr.cpp:6899
llvm::Value * EmitARCLoadWeakRetained(Address addr)
i8* @objc_loadWeakRetained(i8** addr)
Definition CGObjC.cpp:2675
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Definition CGExpr.cpp:107
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
Definition CGExpr.cpp:6875
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2772
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4618
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2354
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1613
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
void EmitAllocToken(llvm::CallBase *CB, QualType AllocType)
Emit and set additional metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1335
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
LValue EmitCastLValue(const CastExpr *E)
EmitCastLValue - Casts are never lvalues unless that cast is to a reference type.
Definition CGExpr.cpp:6159
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
Definition CGExpr.cpp:520
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
Definition CGExpr.cpp:3436
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3997
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:307
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:278
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
Definition CGExpr.cpp:5438
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1639
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
Definition CGExpr.cpp:6353
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Definition CGExpr.cpp:6847
llvm::Type * ConvertTypeForMem(QualType T)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6781
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition CGExpr.cpp:2635
llvm::Value * EmitARCLoadWeak(Address addr)
i8* @objc_loadWeak(i8** addr) Essentially objc_autorelease(objc_loadWeakRetained(addr)).
Definition CGObjC.cpp:2668
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5653
void markStmtMaybeUsed(const Stmt *S)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6853
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7345
LValue EmitCoyieldLValue(const CoyieldExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
Definition CGExpr.cpp:4317
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
Definition CGExpr.cpp:3811
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1596
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1677
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:194
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
Definition CGExpr.cpp:3384
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
Definition CGExpr.cpp:6420
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
Definition CGObjC.cpp:2192
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
Definition CGExpr.cpp:6825
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:747
Address EmitExtVectorElementLValue(LValue V)
Generates lvalue for partial ext_vector access.
Definition CGExpr.cpp:2730
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition CGExpr.cpp:337
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition CGExpr.cpp:2672
static bool hasAggregateEvaluationKind(QualType T)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
Definition CGExpr.cpp:1654
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
Definition CGCall.cpp:4796
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:5188
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4603
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4525
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2276
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1252
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4513
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
Definition CGExpr.cpp:6810
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
generateDestroyHelper - Generates a helper function which, when invoked, destroys the given object.
LValue EmitMemberExpr(const MemberExpr *E)
Definition CGExpr.cpp:5543
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1939
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1712
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Store of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:3223
bool isOpaqueValueEmitted(const OpaqueValueExpr *E)
isOpaqueValueEmitted - Return true if the opaque value expression has already been emitted.
Definition CGExpr.cpp:6414
std::pair< llvm::Value *, CGPointerAuthInfo > EmitOrigPointerRValue(const Expr *E)
Retrieve a pointer rvalue and its ptrauth info.
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2683
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitMatrixElementExpr(const MatrixElementExpr *E)
Definition CGExpr.cpp:2335
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
Definition CGExpr.cpp:719
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
Definition CGExpr.cpp:1606
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:643
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1392
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * performAddrSpaceCast(llvm::Constant *Src, llvm::Type *DestTy)
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition CGExpr.cpp:3483
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition CGCXX.cpp:252
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A specialization of Address that requires the address to be an LLVM Constant.
Definition Address.h:296
llvm::Constant * getPointer() const
Definition Address.h:308
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
AlignmentSource getAlignmentSource() const
Definition CGValue.h:172
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getMatrixRowIdx() const
Definition CGValue.h:412
static LValue MakeMatrixRow(Address Addr, llvm::Value *RowIdx, QualType MatrixTy, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:510
bool isBitField() const
Definition CGValue.h:288
bool isMatrixElt() const
Definition CGValue.h:291
Expr * getBaseIvarExp() const
Definition CGValue.h:344
llvm::Constant * getExtVectorElts() const
Definition CGValue.h:431
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition CGValue.h:500
llvm::Constant * getMatrixRowElts() const
Definition CGValue.h:417
bool isObjCStrong() const
Definition CGValue.h:336
bool isMatrixRowSwizzle() const
Definition CGValue.h:293
bool isGlobalObjCRef() const
Definition CGValue.h:318
bool isVectorElt() const
Definition CGValue.h:287
bool isSimple() const
Definition CGValue.h:286
bool isVolatileQualified() const
Definition CGValue.h:297
RValue asAggregateRValue() const
Definition CGValue.h:545
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition CGValue.h:407
llvm::Value * getGlobalReg() const
Definition CGValue.h:452
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:454
bool isVolatile() const
Definition CGValue.h:340
const Qualifiers & getQuals() const
Definition CGValue.h:350
bool isGlobalReg() const
Definition CGValue.h:290
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:474
bool isObjCWeak() const
Definition CGValue.h:333
Address getAddress() const
Definition CGValue.h:373
unsigned getVRQualifiers() const
Definition CGValue.h:299
bool isMatrixRow() const
Definition CGValue.h:292
LValue setKnownNonNull()
Definition CGValue.h:362
bool isNonGC() const
Definition CGValue.h:315
bool isExtVectorElt() const
Definition CGValue.h:289
llvm::Value * getVectorIdx() const
Definition CGValue.h:394
void setNontemporal(bool Value)
Definition CGValue.h:331
LValueBaseInfo getBaseInfo() const
Definition CGValue.h:358
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition CGValue.h:327
QualType getType() const
Definition CGValue.h:303
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:446
bool isThreadLocalRef() const
Definition CGValue.h:321
KnownNonNull_t isKnownNonNull() const
Definition CGValue.h:361
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:347
void setNonGC(bool Value)
Definition CGValue.h:316
static LValue MakeMatrixRowSwizzle(Address MatAddr, llvm::Value *RowIdx, llvm::Constant *Cols, QualType MatrixTy, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:521
Address getVectorAddress() const
Definition CGValue.h:382
bool isNontemporal() const
Definition CGValue.h:330
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition CGValue.h:490
bool isObjCIvar() const
Definition CGValue.h:309
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:464
void setAddress(Address address)
Definition CGValue.h:375
Address getExtVectorAddress() const
Definition CGValue.h:423
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:535
Address getMatrixAddress() const
Definition CGValue.h:399
Address getBitFieldAddress() const
Definition CGValue.h:437
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
An abstract representation of an aligned address.
Definition Address.h:42
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition Address.h:93
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:77
llvm::Value * getPointer() const
Definition Address.h:66
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition Address.h:83
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3330
QualType getElementType() const
Definition TypeBase.h:3340
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
bool isFileScope() const
Definition Expr.h:3640
const Expr * getInitializer() const
Definition Expr.h:3636
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1085
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4442
unsigned getNumColumns() const
Returns the number of columns in the matrix.
Definition TypeBase.h:4461
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition TypeBase.h:4458
DeclContext * getLexicalParent()
getLexicalParent - Returns the containing lexical DeclContext.
Definition DeclBase.h:2138
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1477
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:488
ValueDecl * getDecl()
Definition Expr.h:1341
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1471
SourceLocation getLocation() const
Definition Expr.h:1349
T * getAttr() const
Definition DeclBase.h:581
SourceLocation getLocation() const
Definition DeclBase.h:447
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition DeclBase.cpp:576
DeclContext * getDeclContext()
Definition DeclBase.h:456
bool hasAttr() const
Definition DeclBase.h:585
const Expr * getBase() const
Definition Expr.h:6580
ExplicitCastExpr - An explicit cast written in the source code.
Definition Expr.h:3931
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:84
bool isGLValue() const
Definition Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:447
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition Expr.h:285
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition Expr.h:284
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1546
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:479
QualType getType() const
Definition Expr.h:144
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition Expr.cpp:3001
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4436
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4549
ExtVectorType - Extended vector type.
Definition TypeBase.h:4322
Represents a member of a struct/union/class.
Definition Decl.h:3175
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3278
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3260
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3411
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4821
const Expr * getSubExpr() const
Definition Expr.h:1065
Represents a function declaration or definition.
Definition Decl.h:2015
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3728
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4558
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition Expr.h:7396
const OpaqueValueExpr * getCastedTemporary() const
Definition Expr.h:7447
const OpaqueValueExpr * getOpaqueArgLValue() const
Definition Expr.h:7428
bool isInOut() const
returns true if the parameter is inout and false if the parameter is out.
Definition Expr.h:7455
const Expr * getWritebackCast() const
Definition Expr.h:7442
const Expr * getArgLValue() const
Return the l-value expression that was written as the argument in source.
Definition Expr.h:7437
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4920
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4945
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4970
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4581
MatrixSingleSubscriptExpr - Matrix single subscript expression for the MatrixType extension when you ...
Definition Expr.h:2798
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2838
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition Expr.h:2868
bool isIncomplete() const
Definition Expr.h:2888
QualType getElementType() const
Returns type of the elements being stored in the matrix.
Definition TypeBase.h:4406
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3591
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3562
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3708
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition NSAPI.cpp:481
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
ObjCEncodeExpr, used for @encode in Objective-C.
Definition ExprObjC.h:441
Represents an ObjC class declaration.
Definition DeclObjC.h:1154
ObjCIvarDecl - Represents an ObjC instance variable.
Definition DeclObjC.h:1952
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition ExprObjC.h:580
ObjCIvarDecl * getDecl()
Definition ExprObjC.h:610
bool isArrow() const
Definition ExprObjC.h:618
const Expr * getBase() const
Definition ExprObjC.h:614
An expression that sends a message to the given Objective-C object or class.
Definition ExprObjC.h:971
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1395
QualType getReturnType() const
Definition DeclObjC.h:329
ObjCSelectorExpr used for @selector in Objective-C.
Definition ExprObjC.h:486
Selector getSelector() const
Definition ExprObjC.h:500
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
const Expr * getSubExpr() const
Definition Expr.h:2202
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3383
QualType getPointeeType() const
Definition TypeBase.h:3393
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
StringRef getIdentKindName() const
Definition Expr.h:2065
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2043
StringLiteral * getFunctionName()
Definition Expr.h:2052
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6803
semantics_iterator semantics_end()
Definition Expr.h:6868
semantics_iterator semantics_begin()
Definition Expr.h:6864
const Expr *const * const_semantics_iterator
Definition Expr.h:6863
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition Expr.h:6851
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8520
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1464
QualType withoutLocalFastQualifiers() const
Definition TypeBase.h:1225
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8562
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8476
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1449
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8621
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8530
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1190
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1556
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition TypeBase.h:1036
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool hasConst() const
Definition TypeBase.h:457
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void removeObjCGCAttr()
Definition TypeBase.h:523
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
void removePointerAuth()
Definition TypeBase.h:610
void setAddressSpace(LangAS space)
Definition TypeBase.h:591
bool hasVolatile() const
Definition TypeBase.h:467
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:603
ObjCLifetime getObjCLifetime() const
Definition TypeBase.h:545
Represents a struct/union/class.
Definition Decl.h:4342
field_range fields() const
Definition Decl.h:4545
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition Decl.h:4526
bool isAnonymousStructOrUnion() const
Whether this is an anonymous struct or union.
Definition Decl.h:4394
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition Expr.h:4598
StmtClass getStmtClass() const
Definition Stmt.h:1499
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
bool isUnion() const
Definition Decl.h:3943
Exposes information about the current target.
Definition TargetInfo.h:227
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual StringRef getABI() const
Get the ABI currently in use.
The base class of the type hierarchy.
Definition TypeBase.h:1871
bool isBlockPointerType() const
Definition TypeBase.h:8693
bool isVoidType() const
Definition TypeBase.h:9039
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2289
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:455
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition Type.cpp:2000
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9342
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8776
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isArrayType() const
Definition TypeBase.h:8772
bool isFunctionPointerType() const
Definition TypeBase.h:8740
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2410
bool isConstantMatrixType() const
Definition TypeBase.h:8840
bool isPointerType() const
Definition TypeBase.h:8673
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9083
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9333
bool isReferenceType() const
Definition TypeBase.h:8697
bool isEnumeralType() const
Definition TypeBase.h:8804
bool isVariableArrayType() const
Definition TypeBase.h:8784
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
bool isExtVectorBoolType() const
Definition TypeBase.h:8820
bool isBitIntType() const
Definition TypeBase.h:8948
bool isConstantMatrixBoolType() const
Definition TypeBase.h:8826
bool isAnyComplexType() const
Definition TypeBase.h:8808
bool hasPointeeToCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8725
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9219
bool isAtomicType() const
Definition TypeBase.h:8865
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2855
bool isObjectType() const
Determine whether this type is an object type.
Definition TypeBase.h:2563
bool isHLSLResourceRecord() const
Definition Type.cpp:5483
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
Definition Type.h:53
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2515
bool isFunctionType() const
Definition TypeBase.h:8669
bool isObjCObjectPointerType() const
Definition TypeBase.h:8852
bool isVectorType() const
Definition TypeBase.h:8812
bool isAnyPointerType() const
Definition TypeBase.h:8681
bool isSubscriptableVectorType() const
Definition TypeBase.h:8832
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition Type.cpp:690
bool isRecordType() const
Definition TypeBase.h:8800
bool isHLSLResourceRecordArray() const
Definition Type.cpp:5487
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2432
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8719
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2145
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2343
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
@ TLS_None
Not a TLS variable.
Definition Decl.h:946
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4021
Represents a GCC generic vector type.
Definition TypeBase.h:4230
unsigned getNumElements() const
Definition TypeBase.h:4245
#define INT_MIN
Definition limits.h:55
Definition SPIR.cpp:35
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition CGValue.h:142
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ ARCImpreciseLifetime
Definition CGValue.h:137
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition CGValue.h:160
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition Specifiers.h:155
@ SC_Register
Definition Specifiers.h:258
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition Specifiers.h:340
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:343
@ SD_Static
Static storage duration.
Definition Specifiers.h:344
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:341
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:342
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:345
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
LangAS
Defines the address space values used by the address space qualifier of QualType.
llvm::cl::opt< bool > ClSanitizeGuardChecks
SmallVector< CXXBaseSpecifier *, 4 > CXXCastPath
A simple array of base specifiers.
Definition ASTContext.h:151
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
Definition TypeBase.h:5966
bool isLambdaMethod(const DeclContext *DC)
Definition ASTLambda.h:39
@ Other
Other implicit parameter.
Definition Decl.h:1761
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:178
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:181
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
uint64_t Offset
Offset - The byte offset of the final access within the base one.
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
llvm::MDNode * BaseType
BaseType - The base/leading access type.
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:615
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition Expr.h:68