clang 23.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGObjCRuntime.h"
21#include "CGOpenMPRuntime.h"
22#include "CGRecordLayout.h"
23#include "CodeGenFunction.h"
24#include "CodeGenModule.h"
25#include "CodeGenPGO.h"
26#include "ConstantEmitter.h"
27#include "TargetInfo.h"
29#include "clang/AST/ASTLambda.h"
30#include "clang/AST/Attr.h"
31#include "clang/AST/DeclObjC.h"
32#include "clang/AST/Expr.h"
34#include "clang/AST/NSAPI.h"
39#include "clang/Basic/Module.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/StringExtras.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/Support/ConvertUTF.h"
51#include "llvm/Support/Endian.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/Path.h"
54#include "llvm/Support/xxhash.h"
55#include "llvm/Transforms/Utils/SanitizerStats.h"
56
57#include <numeric>
58#include <optional>
59#include <string>
60
61using namespace clang;
62using namespace CodeGen;
63
64namespace clang {
65// TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed
66// by -fsanitize-skip-hot-cutoff
67llvm::cl::opt<bool> ClSanitizeGuardChecks(
68 "ubsan-guard-checks", llvm::cl::Optional,
69 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
70
71} // namespace clang
72
73//===--------------------------------------------------------------------===//
74// Defines for metadata
75//===--------------------------------------------------------------------===//
76
77// Those values are crucial to be the SAME as in ubsan runtime library.
79 /// An integer type.
80 TK_Integer = 0x0000,
81 /// A floating-point type.
82 TK_Float = 0x0001,
83 /// An _BitInt(N) type.
84 TK_BitInt = 0x0002,
85 /// Any other type. The value representation is unspecified.
86 TK_Unknown = 0xffff
87};
88
89//===--------------------------------------------------------------------===//
90// Miscellaneous Helper Methods
91//===--------------------------------------------------------------------===//
92
93static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID) {
94 switch (ID) {
95#define SANITIZER_CHECK(Enum, Name, Version, Msg) \
96 case SanitizerHandler::Enum: \
97 return Msg;
99#undef SANITIZER_CHECK
100 }
101 llvm_unreachable("unhandled switch case");
102}
103
104/// CreateTempAlloca - This creates a alloca and inserts it into the entry
105/// block.
108 const Twine &Name,
109 llvm::Value *ArraySize) {
110 if (getLangOpts().EmitLogicalPointer) {
111 auto Alloca = Builder.CreateStructuredAlloca(Ty, Name);
112 return RawAddress(Alloca, Ty, Align, KnownNonNull);
113 }
114
115 auto *Alloca = CreateTempAlloca(Ty, Name, ArraySize);
116 Alloca->setAlignment(Align.getAsAlign());
117 return RawAddress(Alloca, Ty, Align, KnownNonNull);
118}
119
120RawAddress CodeGenFunction::MaybeCastStackAddressSpace(RawAddress Alloca,
121 LangAS DestLangAS,
122 llvm::Value *ArraySize) {
123
124 llvm::Value *V = Alloca.getPointer();
125 // Alloca always returns a pointer in alloca address space, which may
126 // be different from the type defined by the language. For example,
127 // in C++ the auto variables are in the default address space. Therefore
128 // cast alloca to the default address space when necessary.
129
130 unsigned DestAddrSpace = getContext().getTargetAddressSpace(DestLangAS);
131 if (DestAddrSpace != Alloca.getAddressSpace()) {
132 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
133 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
134 // otherwise alloca is inserted at the current insertion point of the
135 // builder.
136 if (!ArraySize)
137 Builder.SetInsertPoint(getPostAllocaInsertPoint());
138 V = performAddrSpaceCast(V, Builder.getPtrTy(DestAddrSpace));
139 }
140
141 return RawAddress(V, Alloca.getElementType(), Alloca.getAlignment(),
143}
144
146 CharUnits Align, const Twine &Name,
147 llvm::Value *ArraySize,
148 RawAddress *AllocaAddr) {
149 RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
150 if (AllocaAddr)
151 *AllocaAddr = Alloca;
152 return MaybeCastStackAddressSpace(Alloca, DestLangAS, ArraySize);
153}
154
155/// CreateTempAlloca - This creates an alloca and inserts it into the entry
156/// block if \p ArraySize is nullptr, otherwise inserts it at the current
157/// insertion point of the builder.
158llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
159 const Twine &Name,
160 llvm::Value *ArraySize) {
161 llvm::AllocaInst *Alloca;
162 if (ArraySize)
163 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
164 else
165 Alloca =
166 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
167 ArraySize, Name, AllocaInsertPt->getIterator());
168 if (SanOpts.Mask & SanitizerKind::Address) {
169 Alloca->addAnnotationMetadata({"alloca_name_altered", Name.str()});
170 }
171 if (Allocas) {
172 Allocas->Add(Alloca);
173 }
174 return Alloca;
175}
176
177/// CreateDefaultAlignTempAlloca - This creates an alloca with the
178/// default alignment of the corresponding LLVM type, which is *not*
179/// guaranteed to be related in any way to the expected alignment of
180/// an AST type that might have been lowered to Ty.
182 const Twine &Name) {
183 CharUnits Align =
184 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
185 return CreateTempAlloca(Ty, LangAS::Default, Align, Name);
186}
187
189 const Twine &Name) {
191 return CreateTempAllocaWithoutCast(ConvertType(Ty), Align, Name, nullptr);
192}
193
195 RawAddress *Alloca) {
196 // FIXME: Should we prefer the preferred type alignment here?
197 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
198}
199
201 const Twine &Name,
202 RawAddress *Alloca) {
205 /*ArraySize=*/nullptr, Alloca);
206
207 if (Ty->isConstantMatrixType()) {
208 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
209 auto *ArrayElementTy = ArrayTy->getElementType();
210 auto ArrayElements = ArrayTy->getNumElements();
211 if (getContext().getLangOpts().HLSL) {
212 auto *VectorTy = cast<llvm::FixedVectorType>(ArrayElementTy);
213 ArrayElementTy = VectorTy->getElementType();
214 ArrayElements *= VectorTy->getNumElements();
215 }
216 auto *VectorTy = llvm::FixedVectorType::get(ArrayElementTy, ArrayElements);
217
218 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
220 }
221 return Result;
222}
223
225 CharUnits Align,
226 const Twine &Name) {
227 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
228}
229
231 const Twine &Name) {
232 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
233 Name);
234}
235
236/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
237/// expression and compare the result against zero, returning an Int1Ty value.
239 PGO->setCurrentStmt(E);
240 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
241 llvm::Value *MemPtr = EmitScalarExpr(E);
242 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
243 }
244
245 QualType BoolTy = getContext().BoolTy;
246 SourceLocation Loc = E->getExprLoc();
247 CGFPOptionsRAII FPOptsRAII(*this, E);
248 if (!E->getType()->isAnyComplexType())
249 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
250
252 Loc);
253}
254
255/// EmitIgnoredExpr - Emit code to compute the specified expression,
256/// ignoring the result.
258 if (E->isPRValue())
259 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
260
261 // if this is a bitfield-resulting conditional operator, we can special case
262 // emit this. The normal 'EmitLValue' version of this is particularly
263 // difficult to codegen for, since creating a single "LValue" for two
264 // different sized arguments here is not particularly doable.
265 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
267 if (CondOp->getObjectKind() == OK_BitField)
268 return EmitIgnoredConditionalOperator(CondOp);
269 }
270
271 // Just emit it as an l-value and drop the result.
272 EmitLValue(E);
273}
274
275/// EmitAnyExpr - Emit code to compute the specified expression which
276/// can have any type. The result is returned as an RValue struct.
277/// If this is an aggregate expression, AggSlot indicates where the
278/// result should be returned.
280 AggValueSlot aggSlot,
281 bool ignoreResult) {
282 switch (getEvaluationKind(E->getType())) {
283 case TEK_Scalar:
284 return RValue::get(EmitScalarExpr(E, ignoreResult));
285 case TEK_Complex:
286 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
287 case TEK_Aggregate:
288 if (!ignoreResult && aggSlot.isIgnored())
289 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
290 EmitAggExpr(E, aggSlot);
291 return aggSlot.asRValue();
292 }
293 llvm_unreachable("bad evaluation kind");
294}
295
296/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
297/// always be accessible even if no aggregate location is provided.
300
302 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
303 return EmitAnyExpr(E, AggSlot);
304}
305
306/// EmitAnyExprToMem - Evaluate an expression into a given memory
307/// location.
309 Address Location,
310 Qualifiers Quals,
311 bool IsInit) {
312 // FIXME: This function should take an LValue as an argument.
313 switch (getEvaluationKind(E->getType())) {
314 case TEK_Complex:
316 /*isInit*/ false);
317 return;
318
319 case TEK_Aggregate: {
320 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
325 return;
326 }
327
328 case TEK_Scalar: {
329 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
330 LValue LV = MakeAddrLValue(Location, E->getType());
332 return;
333 }
334 }
335 llvm_unreachable("bad evaluation kind");
336}
337
339 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
340 QualType Type = LV.getType();
341 switch (getEvaluationKind(Type)) {
342 case TEK_Complex:
343 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
344 return;
345 case TEK_Aggregate:
349 AggValueSlot::MayOverlap, IsZeroed));
350 return;
351 case TEK_Scalar:
352 if (LV.isSimple())
353 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
354 else
356 return;
357 }
358 llvm_unreachable("bad evaluation kind");
359}
360
361static void
363 const Expr *E, Address ReferenceTemporary) {
364 // Objective-C++ ARC:
365 // If we are binding a reference to a temporary that has ownership, we
366 // need to perform retain/release operations on the temporary.
367 //
368 // FIXME: This should be looking at E, not M.
369 if (auto Lifetime = M->getType().getObjCLifetime()) {
370 switch (Lifetime) {
373 // Carry on to normal cleanup handling.
374 break;
375
377 // Nothing to do; cleaned up by an autorelease pool.
378 return;
379
382 switch (StorageDuration Duration = M->getStorageDuration()) {
383 case SD_Static:
384 // Note: we intentionally do not register a cleanup to release
385 // the object on program termination.
386 return;
387
388 case SD_Thread:
389 // FIXME: We should probably register a cleanup in this case.
390 return;
391
392 case SD_Automatic:
396 if (Lifetime == Qualifiers::OCL_Strong) {
397 const ValueDecl *VD = M->getExtendingDecl();
398 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
399 VD->hasAttr<ObjCPreciseLifetimeAttr>();
403 } else {
404 // __weak objects always get EH cleanups; otherwise, exceptions
405 // could cause really nasty crashes instead of mere leaks.
408 }
409 if (Duration == SD_FullExpression)
410 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
411 M->getType(), *Destroy,
413 else
414 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
415 M->getType(),
416 *Destroy, CleanupKind & EHCleanup);
417 return;
418
419 case SD_Dynamic:
420 llvm_unreachable("temporary cannot have dynamic storage duration");
421 }
422 llvm_unreachable("unknown storage duration");
423 }
424 }
425
427 if (DK != QualType::DK_none) {
428 switch (M->getStorageDuration()) {
429 case SD_Static:
430 case SD_Thread: {
431 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
432 if (const auto *ClassDecl =
434 ClassDecl && !ClassDecl->hasTrivialDestructor())
435 // Get the destructor for the reference temporary.
436 ReferenceTemporaryDtor = ClassDecl->getDestructor();
437
438 if (!ReferenceTemporaryDtor)
439 return;
440
441 llvm::FunctionCallee CleanupFn;
442 llvm::Constant *CleanupArg;
443 if (E->getType()->isArrayType()) {
445 ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject,
446 CGF.getLangOpts().Exceptions,
447 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
448 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
449 } else {
450 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
451 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
452 CleanupArg =
453 cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
454 }
456 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
457 } break;
459 CGF.pushDestroy(DK, ReferenceTemporary, E->getType());
460 break;
461 case SD_Automatic:
462 CGF.pushLifetimeExtendedDestroy(DK, ReferenceTemporary, E->getType());
463 break;
464 case SD_Dynamic:
465 llvm_unreachable("temporary cannot have dynamic storage duration");
466 }
467 }
468}
469
472 const Expr *Inner,
473 RawAddress *Alloca = nullptr) {
474 switch (M->getStorageDuration()) {
476 case SD_Automatic: {
477 // If we have a constant temporary array or record try to promote it into a
478 // constant global under the same rules a normal constant would've been
479 // promoted. This is easier on the optimizer and generally emits fewer
480 // instructions.
481 QualType Ty = Inner->getType();
482 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
483 (Ty->isArrayType() || Ty->isRecordType()) &&
484 Ty.isConstantStorage(CGF.getContext(), true, false))
485 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
486 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
487 auto *GV = new llvm::GlobalVariable(
488 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
489 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
490 llvm::GlobalValue::NotThreadLocal,
492 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
493 GV->setAlignment(alignment.getAsAlign());
494 llvm::Constant *C = GV;
495 if (AS != LangAS::Default)
497 GV, llvm::PointerType::get(
498 CGF.getLLVMContext(),
500 // FIXME: Should we put the new global into a COMDAT?
501 return RawAddress(C, GV->getValueType(), alignment);
502 }
503 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
504 }
505 case SD_Thread:
506 case SD_Static:
507 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
508
509 case SD_Dynamic:
510 llvm_unreachable("temporary can't have dynamic storage duration");
511 }
512 llvm_unreachable("unknown storage duration");
513}
514
515/// Helper method to check if the underlying ABI is AAPCS
516static bool isAAPCS(const TargetInfo &TargetInfo) {
517 return TargetInfo.getABI().starts_with("aapcs");
518}
519
522 const Expr *E = M->getSubExpr();
523
524 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
525 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
526 "Reference should never be pseudo-strong!");
527
528 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
529 // as that will cause the lifetime adjustment to be lost for ARC
530 auto ownership = M->getType().getObjCLifetime();
531 if (ownership != Qualifiers::OCL_None &&
532 ownership != Qualifiers::OCL_ExplicitNone) {
534 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
535 llvm::Type *Ty = ConvertTypeForMem(E->getType());
536 Object = Object.withElementType(Ty);
537
538 // createReferenceTemporary will promote the temporary to a global with a
539 // constant initializer if it can. It can only do this to a value of
540 // ARC-manageable type if the value is global and therefore "immune" to
541 // ref-counting operations. Therefore we have no need to emit either a
542 // dynamic initialization or a cleanup and we can just return the address
543 // of the temporary.
544 if (Var->hasInitializer())
546
547 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
548 }
549 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
551
552 switch (getEvaluationKind(E->getType())) {
553 default: llvm_unreachable("expected scalar or aggregate expression");
554 case TEK_Scalar:
555 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
556 break;
557 case TEK_Aggregate: {
559 E->getType().getQualifiers(),
564 break;
565 }
566 }
567
568 pushTemporaryCleanup(*this, M, E, Object);
569 return RefTempDst;
570 }
571
574 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
575
576 for (const auto &Ignored : CommaLHSs)
577 EmitIgnoredExpr(Ignored);
578
579 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
580 if (opaque->getType()->isRecordType()) {
581 assert(Adjustments.empty());
582 return EmitOpaqueValueLValue(opaque);
583 }
584 }
585
586 // Create and initialize the reference temporary.
587 RawAddress Alloca = Address::invalid();
588 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
589 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
590 Object.getPointer()->stripPointerCasts())) {
591 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
592 Object = Object.withElementType(TemporaryType);
593 // If the temporary is a global and has a constant initializer or is a
594 // constant temporary that we promoted to a global, we may have already
595 // initialized it.
596 if (!Var->hasInitializer()) {
597 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
599 if (RefType.getPointerAuth()) {
600 // Use the qualifier of the reference temporary to sign the pointer.
601 LValue LV = MakeRawAddrLValue(Object.getPointer(), RefType,
602 Object.getAlignment());
603 EmitScalarInit(E, M->getExtendingDecl(), LV, false);
604 } else {
605 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true);
606 }
607 }
608 } else {
609 switch (M->getStorageDuration()) {
610 case SD_Automatic:
611 if (EmitLifetimeStart(Alloca.getPointer())) {
613 Alloca);
614 }
615 break;
616
617 case SD_FullExpression: {
618 if (!ShouldEmitLifetimeMarkers)
619 break;
620
621 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
622 // marker. Instead, start the lifetime of a conditional temporary earlier
623 // so that it's unconditional. Don't do this with sanitizers which need
624 // more precise lifetime marks. However when inside an "await.suspend"
625 // block, we should always avoid conditional cleanup because it creates
626 // boolean marker that lives across await_suspend, which can destroy coro
627 // frame.
628 ConditionalEvaluation *OldConditional = nullptr;
629 CGBuilderTy::InsertPoint OldIP;
631 ((!SanOpts.has(SanitizerKind::HWAddress) &&
632 !SanOpts.has(SanitizerKind::Memory) &&
633 !SanOpts.has(SanitizerKind::MemtagStack) &&
634 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
635 inSuspendBlock())) {
636 OldConditional = OutermostConditional;
637 OutermostConditional = nullptr;
638
639 OldIP = Builder.saveIP();
640 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
641 Builder.restoreIP(CGBuilderTy::InsertPoint(
642 Block, llvm::BasicBlock::iterator(Block->back())));
643 }
644
645 if (EmitLifetimeStart(Alloca.getPointer())) {
647 }
648
649 if (OldConditional) {
650 OutermostConditional = OldConditional;
651 Builder.restoreIP(OldIP);
652 }
653 break;
654 }
655
656 default:
657 break;
658 }
659 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
660 }
661 pushTemporaryCleanup(*this, M, E, Object);
662
663 // Perform derived-to-base casts and/or field accesses, to get from the
664 // temporary object we created (and, potentially, for which we extended
665 // the lifetime) to the subobject we're binding the reference to.
666 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
667 switch (Adjustment.Kind) {
669 Object =
670 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
671 Adjustment.DerivedToBase.BasePath->path_begin(),
672 Adjustment.DerivedToBase.BasePath->path_end(),
673 /*NullCheckValue=*/ false, E->getExprLoc());
674 break;
675
678 LV = EmitLValueForField(LV, Adjustment.Field);
679 assert(LV.isSimple() &&
680 "materialized temporary field is not a simple lvalue");
681 Object = LV.getAddress();
682 break;
683 }
684
686 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
688 E, Object, Ptr, Adjustment.Ptr.MPT, /*IsInBounds=*/true);
689 break;
690 }
691 }
692 }
693
695}
696
697RValue
699 // Emit the expression as an lvalue.
700 LValue LV = EmitLValue(E);
701 assert(LV.isSimple());
702 llvm::Value *Value = LV.getPointer(*this);
703
705 // C++11 [dcl.ref]p5 (as amended by core issue 453):
706 // If a glvalue to which a reference is directly bound designates neither
707 // an existing object or function of an appropriate type nor a region of
708 // storage of suitable size and alignment to contain an object of the
709 // reference's type, the behavior is undefined.
710 QualType Ty = E->getType();
712 }
713
714 return RValue::get(Value);
715}
716
717
718/// getAccessedFieldNo - Given an encoded value and a result number, return the
719/// input field number being accessed.
721 const llvm::Constant *Elts) {
722 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
723 ->getZExtValue();
724}
725
726static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
727 llvm::Value *Ptr) {
728 llvm::Value *A0 =
729 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
730 llvm::Value *A1 =
731 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
732 return Builder.CreateXor(Acc, A1);
733}
734
739
742 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
743 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
746}
747
749 return SanOpts.has(SanitizerKind::Null) ||
750 SanOpts.has(SanitizerKind::Alignment) ||
751 SanOpts.has(SanitizerKind::ObjectSize) ||
752 SanOpts.has(SanitizerKind::Vptr);
753}
754
756 llvm::Value *Ptr, QualType Ty,
757 CharUnits Alignment,
758 SanitizerSet SkippedChecks,
759 llvm::Value *ArraySize) {
761 return;
762
763 // Don't check pointers outside the default address space. The null check
764 // isn't correct, the object-size check isn't supported by LLVM, and we can't
765 // communicate the addresses to the runtime handler for the vptr check.
766 if (Ptr->getType()->getPointerAddressSpace())
767 return;
768
769 // Don't check pointers to volatile data. The behavior here is implementation-
770 // defined.
771 if (Ty.isVolatileQualified())
772 return;
773
774 // Quickly determine whether we have a pointer to an alloca. It's possible
775 // to skip null checks, and some alignment checks, for these pointers. This
776 // can reduce compile-time significantly.
777 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
778
779 llvm::Value *IsNonNull = nullptr;
780 bool IsGuaranteedNonNull =
781 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
782
783 llvm::BasicBlock *Done = nullptr;
784 bool DoneViaNullSanitize = false;
785
786 {
787 auto CheckHandler = SanitizerHandler::TypeMismatch;
788 SanitizerDebugLocation SanScope(this,
789 {SanitizerKind::SO_Null,
790 SanitizerKind::SO_ObjectSize,
791 SanitizerKind::SO_Alignment},
792 CheckHandler);
793
795 Checks;
796
797 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
798 bool AllowNullPointers = isNullPointerAllowed(TCK);
799 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
800 !IsGuaranteedNonNull) {
801 // The glvalue must not be an empty glvalue.
802 IsNonNull = Builder.CreateIsNotNull(Ptr);
803
804 // The IR builder can constant-fold the null check if the pointer points
805 // to a constant.
806 IsGuaranteedNonNull = IsNonNull == True;
807
808 // Skip the null check if the pointer is known to be non-null.
809 if (!IsGuaranteedNonNull) {
810 if (AllowNullPointers) {
811 // When performing pointer casts, it's OK if the value is null.
812 // Skip the remaining checks in that case.
813 Done = createBasicBlock("null");
814 DoneViaNullSanitize = true;
815 llvm::BasicBlock *Rest = createBasicBlock("not.null");
816 Builder.CreateCondBr(IsNonNull, Rest, Done);
817 EmitBlock(Rest);
818 } else {
819 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
820 }
821 }
822 }
823
824 if (SanOpts.has(SanitizerKind::ObjectSize) &&
825 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
826 !Ty->isIncompleteType()) {
827 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
828 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
829 if (ArraySize)
830 Size = Builder.CreateMul(Size, ArraySize);
831
832 // Degenerate case: new X[0] does not need an objectsize check.
833 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
834 if (!ConstantSize || !ConstantSize->isNullValue()) {
835 // The glvalue must refer to a large enough storage region.
836 // FIXME: If Address Sanitizer is enabled, insert dynamic
837 // instrumentation
838 // to check this.
839 // FIXME: Get object address space
840 llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy};
841 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
842 llvm::Value *Min = Builder.getFalse();
843 llvm::Value *NullIsUnknown = Builder.getFalse();
844 llvm::Value *Dynamic = Builder.getFalse();
845 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
846 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
847 Checks.push_back(
848 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
849 }
850 }
851
852 llvm::MaybeAlign AlignVal;
853 llvm::Value *PtrAsInt = nullptr;
854
855 if (SanOpts.has(SanitizerKind::Alignment) &&
856 !SkippedChecks.has(SanitizerKind::Alignment)) {
857 AlignVal = Alignment.getAsMaybeAlign();
858 if (!Ty->isIncompleteType() && !AlignVal)
859 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
860 /*ForPointeeType=*/true)
861 .getAsMaybeAlign();
862
863 // The glvalue must be suitably aligned.
864 if (AlignVal && *AlignVal > llvm::Align(1) &&
865 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
866 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
867 llvm::Value *Align = Builder.CreateAnd(
868 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
869 llvm::Value *Aligned =
870 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
871 if (Aligned != True)
872 Checks.push_back(
873 std::make_pair(Aligned, SanitizerKind::SO_Alignment));
874 }
875 }
876
877 if (Checks.size() > 0) {
878 llvm::Constant *StaticData[] = {
880 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
881 llvm::ConstantInt::get(Int8Ty, TCK)};
882 EmitCheck(Checks, CheckHandler, StaticData, PtrAsInt ? PtrAsInt : Ptr);
883 }
884 }
885
886 // If possible, check that the vptr indicates that there is a subobject of
887 // type Ty at offset zero within this object.
888 //
889 // C++11 [basic.life]p5,6:
890 // [For storage which does not refer to an object within its lifetime]
891 // The program has undefined behavior if:
892 // -- the [pointer or glvalue] is used to access a non-static data member
893 // or call a non-static member function
894 if (SanOpts.has(SanitizerKind::Vptr) &&
895 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
896 SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr},
897 SanitizerHandler::DynamicTypeCacheMiss);
898
899 // Ensure that the pointer is non-null before loading it. If there is no
900 // compile-time guarantee, reuse the run-time null check or emit a new one.
901 if (!IsGuaranteedNonNull) {
902 if (!IsNonNull)
903 IsNonNull = Builder.CreateIsNotNull(Ptr);
904 if (!Done)
905 Done = createBasicBlock("vptr.null");
906 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
907 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
908 EmitBlock(VptrNotNull);
909 }
910
911 // Compute a deterministic hash of the mangled name of the type.
912 SmallString<64> MangledName;
913 llvm::raw_svector_ostream Out(MangledName);
914 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
915 Out);
916
917 // Contained in NoSanitizeList based on the mangled type.
918 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
919 Out.str())) {
920 // Load the vptr, and mix it with TypeHash.
921 llvm::Value *TypeHash =
922 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
923
924 llvm::Type *VPtrTy = llvm::PointerType::get(getLLVMContext(), 0);
925 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
926 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
927 Ty->getAsCXXRecordDecl(),
929 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
930
931 llvm::Value *Hash =
932 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
933 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
934
935 // Look the hash up in our cache.
936 const int CacheSize = 128;
937 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
938 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
939 "__ubsan_vptr_type_cache");
940 llvm::Value *Slot = Builder.CreateAnd(Hash,
941 llvm::ConstantInt::get(IntPtrTy,
942 CacheSize-1));
943 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
944 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
945 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
947
948 // If the hash isn't in the cache, call a runtime handler to perform the
949 // hard work of checking whether the vptr is for an object of the right
950 // type. This will either fill in the cache and return, or produce a
951 // diagnostic.
952 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
953 llvm::Constant *StaticData[] = {
956 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
957 llvm::ConstantInt::get(Int8Ty, TCK)
958 };
959 llvm::Value *DynamicData[] = { Ptr, Hash };
960 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
961 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
962 DynamicData);
963 }
964 }
965
966 if (Done) {
967 SanitizerDebugLocation SanScope(
968 this,
969 {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr},
970 DoneViaNullSanitize ? SanitizerHandler::TypeMismatch
971 : SanitizerHandler::DynamicTypeCacheMiss);
972 Builder.CreateBr(Done);
973 EmitBlock(Done);
974 }
975}
976
978 QualType EltTy) {
980 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
981 if (!EltSize)
982 return nullptr;
983
984 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
985 if (!ArrayDeclRef)
986 return nullptr;
987
988 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
989 if (!ParamDecl)
990 return nullptr;
991
992 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
993 if (!POSAttr)
994 return nullptr;
995
996 // Don't load the size if it's a lower bound.
997 int POSType = POSAttr->getType();
998 if (POSType != 0 && POSType != 1)
999 return nullptr;
1000
1001 // Find the implicit size parameter.
1002 auto PassedSizeIt = SizeArguments.find(ParamDecl);
1003 if (PassedSizeIt == SizeArguments.end())
1004 return nullptr;
1005
1006 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
1007 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
1008 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
1009 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
1010 C.getSizeType(), E->getExprLoc());
1011 llvm::Value *SizeOfElement =
1012 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
1013 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
1014}
1015
1016/// If Base is known to point to the start of an array, return the length of
1017/// that array. Return 0 if the length cannot be determined.
1019 const Expr *Base,
1020 QualType &IndexedType,
1022 StrictFlexArraysLevel) {
1023 // For the vector indexing extension, the bound is the number of elements.
1024 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
1025 IndexedType = Base->getType();
1026 return CGF.Builder.getInt32(VT->getNumElements());
1027 }
1028
1029 Base = Base->IgnoreParens();
1030
1031 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1032 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
1033 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
1034 StrictFlexArraysLevel)) {
1035 CodeGenFunction::SanitizerScope SanScope(&CGF);
1036
1037 IndexedType = CE->getSubExpr()->getType();
1038 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
1039 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
1040 return CGF.Builder.getInt(CAT->getSize());
1041
1042 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
1043 return CGF.getVLASize(VAT).NumElts;
1044 // Ignore pass_object_size here. It's not applicable on decayed pointers.
1045 }
1046 }
1047
1048 CodeGenFunction::SanitizerScope SanScope(&CGF);
1049
1050 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
1051 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
1052 IndexedType = Base->getType();
1053 return POS;
1054 }
1055
1056 return nullptr;
1057}
1058
1059namespace {
1060
1061/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1062/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1063///
1064/// p in p-> a.b.c
1065///
1066/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1067/// looking for:
1068///
1069/// struct s {
1070/// struct s *ptr;
1071/// int count;
1072/// char array[] __attribute__((counted_by(count)));
1073/// };
1074///
1075/// If we have an expression like \p p->ptr->array[index], we want the
1076/// \p MemberExpr for \p p->ptr instead of \p p.
1077class StructAccessBase
1078 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1079 const RecordDecl *ExpectedRD;
1080
1081 bool IsExpectedRecordDecl(const Expr *E) const {
1082 QualType Ty = E->getType();
1083 if (Ty->isPointerType())
1084 Ty = Ty->getPointeeType();
1085 return ExpectedRD == Ty->getAsRecordDecl();
1086 }
1087
1088public:
1089 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1090
1091 //===--------------------------------------------------------------------===//
1092 // Visitor Methods
1093 //===--------------------------------------------------------------------===//
1094
1095 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1096 // horrors like this:
1097 //
1098 // struct S {
1099 // int x, y;
1100 // int blah[] __attribute__((counted_by(x)));
1101 // } s;
1102 //
1103 // int foo(int index, int val) {
1104 // int (S::*IHatePMDs)[] = &S::blah;
1105 // (s.*IHatePMDs)[index] = val;
1106 // }
1107
1108 const Expr *Visit(const Expr *E) {
1109 return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
1110 }
1111
1112 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1113
1114 // These are the types we expect to return (in order of most to least
1115 // likely):
1116 //
1117 // 1. DeclRefExpr - This is the expression for the base of the structure.
1118 // It's exactly what we want to build an access to the \p counted_by
1119 // field.
1120 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1121 // as the flexble array member's lexical enclosing \p RecordDecl. This
1122 // allows us to catch things like: "p->p->array"
1123 // 3. CompoundLiteralExpr - This is for people who create something
1124 // heretical like (struct foo has a flexible array member):
1125 //
1126 // (struct foo){ 1, 2 }.blah[idx];
1127 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1128 return IsExpectedRecordDecl(E) ? E : nullptr;
1129 }
1130 const Expr *VisitMemberExpr(const MemberExpr *E) {
1131 if (IsExpectedRecordDecl(E) && E->isArrow())
1132 return E;
1133 const Expr *Res = Visit(E->getBase());
1134 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1135 }
1136 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1137 return IsExpectedRecordDecl(E) ? E : nullptr;
1138 }
1139 const Expr *VisitCallExpr(const CallExpr *E) {
1140 return IsExpectedRecordDecl(E) ? E : nullptr;
1141 }
1142
1143 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1144 if (IsExpectedRecordDecl(E))
1145 return E;
1146 return Visit(E->getBase());
1147 }
1148 const Expr *VisitCastExpr(const CastExpr *E) {
1149 if (E->getCastKind() == CK_LValueToRValue)
1150 return IsExpectedRecordDecl(E) ? E : nullptr;
1151 return Visit(E->getSubExpr());
1152 }
1153 const Expr *VisitParenExpr(const ParenExpr *E) {
1154 return Visit(E->getSubExpr());
1155 }
1156 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1157 return Visit(E->getSubExpr());
1158 }
1159 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1160 return Visit(E->getSubExpr());
1161 }
1162};
1163
1164} // end anonymous namespace
1165
1167
1169 const FieldDecl *Field,
1170 RecIndicesTy &Indices) {
1171 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1172 int64_t FieldNo = -1;
1173 for (const FieldDecl *FD : RD->fields()) {
1174 if (!Layout.containsFieldDecl(FD))
1175 // This could happen if the field has a struct type that's empty. I don't
1176 // know why either.
1177 continue;
1178
1179 FieldNo = Layout.getLLVMFieldNo(FD);
1180 if (FD == Field) {
1181 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1182 return true;
1183 }
1184
1185 QualType Ty = FD->getType();
1186 if (Ty->isRecordType()) {
1187 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1188 if (RD->isUnion())
1189 FieldNo = 0;
1190 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1191 return true;
1192 }
1193 }
1194 }
1195
1196 return false;
1197}
1198
1200 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1201 // Find the record containing the count field. Walk up through anonymous
1202 // structs/unions (which are transparent in C) but stop at named records.
1203 // Using getOuterLexicalRecordContext() here would be wrong because it walks
1204 // past named nested structs to the outermost record, causing a crash when a
1205 // struct with a counted_by FAM is defined nested inside another struct.
1206 const RecordDecl *RD = CountDecl->getParent();
1207 while (RD->isAnonymousStructOrUnion()) {
1208 const auto *Parent = dyn_cast<RecordDecl>(RD->getLexicalParent());
1209 if (!Parent)
1210 break;
1211 RD = Parent;
1212 }
1213
1214 // Find the base struct expr (i.e. p in p->a.b.c.d).
1215 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1216 if (!StructBase || StructBase->HasSideEffects(getContext()))
1217 return nullptr;
1218
1219 llvm::Value *Res = nullptr;
1220 if (StructBase->getType()->isPointerType()) {
1221 LValueBaseInfo BaseInfo;
1222 TBAAAccessInfo TBAAInfo;
1223 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1224 Res = Addr.emitRawPointer(*this);
1225 } else if (StructBase->isLValue()) {
1226 LValue LV = EmitLValue(StructBase);
1227 Address Addr = LV.getAddress();
1228 Res = Addr.emitRawPointer(*this);
1229 } else {
1230 return nullptr;
1231 }
1232
1233 RecIndicesTy Indices;
1234 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1235 if (Indices.empty())
1236 return nullptr;
1237
1238 Indices.push_back(Builder.getInt32(0));
1239 CanQualType T = CGM.getContext().getCanonicalTagType(RD);
1240 return Builder.CreateInBoundsGEP(ConvertType(T), Res,
1241 RecIndicesTy(llvm::reverse(Indices)),
1242 "counted_by.gep");
1243}
1244
1245/// This method is typically called in contexts where we can't generate
1246/// side-effects, like in __builtin_dynamic_object_size. When finding
1247/// expressions, only choose those that have either already been emitted or can
1248/// be loaded without side-effects.
1249///
1250/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1251/// within the top-level struct.
1252/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1254 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1255 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1256 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1257 getIntAlign(), "counted_by.load");
1258 return nullptr;
1259}
1260
1262 const Expr *ArrayExprBase,
1263 llvm::Value *IndexVal, QualType IndexType,
1264 bool Accessed) {
1265 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1266 "should not be called unless adding bounds checks");
1267 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1268 getLangOpts().getStrictFlexArraysLevel();
1269 QualType ArrayExprBaseType;
1270 llvm::Value *BoundsVal = getArrayIndexingBound(
1271 *this, ArrayExprBase, ArrayExprBaseType, StrictFlexArraysLevel);
1272
1273 EmitBoundsCheckImpl(ArrayExpr, ArrayExprBaseType, IndexVal, IndexType,
1274 BoundsVal, getContext().getSizeType(), Accessed);
1275}
1276
1278 QualType ArrayBaseType,
1279 llvm::Value *IndexVal,
1280 QualType IndexType,
1281 llvm::Value *BoundsVal,
1282 QualType BoundsType, bool Accessed) {
1283 if (!BoundsVal)
1284 return;
1285
1286 auto CheckKind = SanitizerKind::SO_ArrayBounds;
1287 auto CheckHandler = SanitizerHandler::OutOfBounds;
1288 SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler);
1289
1290 // All hail the C implicit type conversion rules!!!
1291 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1292 bool BoundsSigned = BoundsType->isSignedIntegerOrEnumerationType();
1293
1294 const ASTContext &Ctx = getContext();
1295 llvm::Type *Ty = ConvertType(
1296 Ctx.getTypeSize(IndexType) >= Ctx.getTypeSize(BoundsType) ? IndexType
1297 : BoundsType);
1298
1299 llvm::Value *IndexInst = Builder.CreateIntCast(IndexVal, Ty, IndexSigned);
1300 llvm::Value *BoundsInst = Builder.CreateIntCast(BoundsVal, Ty, false);
1301
1302 llvm::Constant *StaticData[] = {
1303 EmitCheckSourceLocation(ArrayExpr->getExprLoc()),
1304 EmitCheckTypeDescriptor(ArrayBaseType),
1305 EmitCheckTypeDescriptor(IndexType),
1306 };
1307
1308 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexInst, BoundsInst)
1309 : Builder.CreateICmpULE(IndexInst, BoundsInst);
1310
1311 if (BoundsSigned) {
1312 // Don't allow a negative bounds.
1313 llvm::Value *Cmp = Builder.CreateICmpSGT(
1314 BoundsVal, llvm::ConstantInt::get(BoundsVal->getType(), 0));
1315 Check = Builder.CreateAnd(Cmp, Check);
1316 }
1317
1318 EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData,
1319 IndexInst);
1320}
1321
1323 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, getContext());
1324 if (!ATMD)
1325 return nullptr;
1326
1327 llvm::MDBuilder MDB(getLLVMContext());
1328 auto *TypeNameMD = MDB.createString(ATMD->TypeName);
1329 auto *ContainsPtrC = Builder.getInt1(ATMD->ContainsPointer);
1330 auto *ContainsPtrMD = MDB.createConstant(ContainsPtrC);
1331
1332 // Format: !{<type-name>, <contains-pointer>}
1333 return llvm::MDNode::get(CGM.getLLVMContext(), {TypeNameMD, ContainsPtrMD});
1334}
1335
1336void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) {
1337 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1338 "Only needed with -fsanitize=alloc-token");
1339 CB->setMetadata(llvm::LLVMContext::MD_alloc_token,
1340 buildAllocToken(AllocType));
1341}
1342
1345 if (!AllocType.isNull())
1346 return buildAllocToken(AllocType);
1347 return nullptr;
1348}
1349
1350void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, const CallExpr *E) {
1351 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1352 "Only needed with -fsanitize=alloc-token");
1353 if (llvm::MDNode *MDN = buildAllocToken(E))
1354 CB->setMetadata(llvm::LLVMContext::MD_alloc_token, MDN);
1355}
1356
1359 bool isInc, bool isPre) {
1360 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1361
1362 llvm::Value *NextVal;
1363 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1364 uint64_t AmountVal = isInc ? 1 : -1;
1365 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1366
1367 // Add the inc/dec to the real part.
1368 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1369 } else {
1370 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1371 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1372 if (!isInc)
1373 FVal.changeSign();
1374 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1375
1376 // Add the inc/dec to the real part.
1377 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1378 }
1379
1380 ComplexPairTy IncVal(NextVal, InVal.second);
1381
1382 // Store the updated result through the lvalue.
1383 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1384 if (getLangOpts().OpenMP)
1385 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1386 E->getSubExpr());
1387
1388 // If this is a postinc, return the value read from memory, otherwise use the
1389 // updated value.
1390 return isPre ? IncVal : InVal;
1391}
1392
1394 CodeGenFunction *CGF) {
1395 // Bind VLAs in the cast type.
1396 if (CGF && E->getType()->isVariablyModifiedType())
1398
1399 if (CGDebugInfo *DI = getModuleDebugInfo())
1400 DI->EmitExplicitCastType(E->getType());
1401}
1402
1403//===----------------------------------------------------------------------===//
1404// LValue Expression Emission
1405//===----------------------------------------------------------------------===//
1406
1407static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx,
1408 CharUnits eltSize) {
1409 // If we have a constant index, we can use the exact offset of the
1410 // element we're accessing.
1411 if (auto *constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
1412 CharUnits offset = constantIdx->getZExtValue() * eltSize;
1413 return arrayAlign.alignmentAtOffset(offset);
1414 }
1415
1416 // Otherwise, use the worst-case alignment for any element.
1417 return arrayAlign.alignmentOfArrayElement(eltSize);
1418}
1419
1420/// Emit pointer + index arithmetic.
1422 const BinaryOperator *BO,
1423 LValueBaseInfo *BaseInfo,
1424 TBAAAccessInfo *TBAAInfo,
1425 KnownNonNull_t IsKnownNonNull) {
1426 assert(BO->isAdditiveOp() && "Expect an addition or subtraction.");
1427 Expr *pointerOperand = BO->getLHS();
1428 Expr *indexOperand = BO->getRHS();
1429 bool isSubtraction = BO->getOpcode() == BO_Sub;
1430
1431 Address BaseAddr = Address::invalid();
1432 llvm::Value *index = nullptr;
1433 // In a subtraction, the LHS is always the pointer.
1434 // Note: do not change the evaluation order.
1435 if (!isSubtraction && !pointerOperand->getType()->isAnyPointerType()) {
1436 std::swap(pointerOperand, indexOperand);
1437 index = CGF.EmitScalarExpr(indexOperand);
1438 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1440 } else {
1441 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1443 index = CGF.EmitScalarExpr(indexOperand);
1444 }
1445
1446 llvm::Value *pointer = BaseAddr.getBasePointer();
1447 llvm::Value *Res = CGF.EmitPointerArithmetic(
1448 BO, pointerOperand, pointer, indexOperand, index, isSubtraction);
1449 QualType PointeeTy = BO->getType()->getPointeeType();
1450 CharUnits Align =
1452 CGF.getContext().getTypeSizeInChars(PointeeTy));
1453 return Address(Res, CGF.ConvertTypeForMem(PointeeTy), Align,
1455 /*Offset=*/nullptr, IsKnownNonNull);
1456}
1457
1459 TBAAAccessInfo *TBAAInfo,
1460 KnownNonNull_t IsKnownNonNull,
1461 CodeGenFunction &CGF) {
1462 // We allow this with ObjC object pointers because of fragile ABIs.
1463 assert(E->getType()->isPointerType() ||
1465 E = E->IgnoreParens();
1466
1467 // Casts:
1468 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1469 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1470 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1471
1472 switch (CE->getCastKind()) {
1473 // Non-converting casts (but not C's implicit conversion from void*).
1474 case CK_BitCast:
1475 case CK_NoOp:
1476 case CK_AddressSpaceConversion:
1477 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1478 if (PtrTy->getPointeeType()->isVoidType())
1479 break;
1480
1481 LValueBaseInfo InnerBaseInfo;
1482 TBAAAccessInfo InnerTBAAInfo;
1484 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1485 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1486 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1487
1488 if (isa<ExplicitCastExpr>(CE)) {
1489 LValueBaseInfo TargetTypeBaseInfo;
1490 TBAAAccessInfo TargetTypeTBAAInfo;
1492 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1493 if (TBAAInfo)
1494 *TBAAInfo =
1495 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1496 // If the source l-value is opaque, honor the alignment of the
1497 // casted-to type.
1498 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1499 if (BaseInfo)
1500 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1501 Addr.setAlignment(Align);
1502 }
1503 }
1504
1505 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1506 CE->getCastKind() == CK_BitCast) {
1507 if (auto PT = E->getType()->getAs<PointerType>())
1508 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1509 /*MayBeNull=*/true,
1511 CE->getBeginLoc());
1512 }
1513
1514 llvm::Type *ElemTy =
1516 Addr = Addr.withElementType(ElemTy);
1517 if (CE->getCastKind() == CK_AddressSpaceConversion)
1519 Addr, CGF.ConvertType(E->getType()), ElemTy);
1520
1521 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1522 CE->getType());
1523 }
1524 break;
1525
1526 // Array-to-pointer decay.
1527 case CK_ArrayToPointerDecay:
1528 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1529
1530 // Derived-to-base conversions.
1531 case CK_UncheckedDerivedToBase:
1532 case CK_DerivedToBase: {
1533 // TODO: Support accesses to members of base classes in TBAA. For now, we
1534 // conservatively pretend that the complete object is of the base class
1535 // type.
1536 if (TBAAInfo)
1537 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1539 CE->getSubExpr(), BaseInfo, nullptr,
1540 (KnownNonNull_t)(IsKnownNonNull ||
1541 CE->getCastKind() == CK_UncheckedDerivedToBase));
1542 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1543 return CGF.GetAddressOfBaseClass(
1544 Addr, Derived, CE->path_begin(), CE->path_end(),
1545 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1546 }
1547
1548 // TODO: Is there any reason to treat base-to-derived conversions
1549 // specially?
1550 default:
1551 break;
1552 }
1553 }
1554
1555 // Unary &.
1556 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1557 if (UO->getOpcode() == UO_AddrOf) {
1558 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1559 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1560 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1561 return LV.getAddress();
1562 }
1563 }
1564
1565 // std::addressof and variants.
1566 if (auto *Call = dyn_cast<CallExpr>(E)) {
1567 switch (Call->getBuiltinCallee()) {
1568 default:
1569 break;
1570 case Builtin::BIaddressof:
1571 case Builtin::BI__addressof:
1572 case Builtin::BI__builtin_addressof: {
1573 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1574 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1575 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1576 return LV.getAddress();
1577 }
1578 }
1579 }
1580
1581 // Pointer arithmetic: pointer +/- index.
1582 if (auto *BO = dyn_cast<BinaryOperator>(E)) {
1583 if (BO->isAdditiveOp())
1584 return emitPointerArithmetic(CGF, BO, BaseInfo, TBAAInfo, IsKnownNonNull);
1585 }
1586
1587 // TODO: conditional operators, comma.
1588
1589 // Otherwise, use the alignment of the type.
1592 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1593}
1594
1595/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1596/// derive a more accurate bound on the alignment of the pointer.
1598 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1599 KnownNonNull_t IsKnownNonNull) {
1600 Address Addr =
1601 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1602 if (IsKnownNonNull && !Addr.isKnownNonNull())
1603 Addr.setKnownNonNull();
1604 return Addr;
1605}
1606
1608 llvm::Value *V = RV.getScalarVal();
1609 if (auto MPT = T->getAs<MemberPointerType>())
1610 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1611 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1612}
1613
1615 if (Ty->isVoidType())
1616 return RValue::get(nullptr);
1617
1618 switch (getEvaluationKind(Ty)) {
1619 case TEK_Complex: {
1620 llvm::Type *EltTy =
1622 llvm::Value *U = llvm::UndefValue::get(EltTy);
1623 return RValue::getComplex(std::make_pair(U, U));
1624 }
1625
1626 // If this is a use of an undefined aggregate type, the aggregate must have an
1627 // identifiable address. Just because the contents of the value are undefined
1628 // doesn't mean that the address can't be taken and compared.
1629 case TEK_Aggregate: {
1630 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1631 return RValue::getAggregate(DestPtr);
1632 }
1633
1634 case TEK_Scalar:
1635 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1636 }
1637 llvm_unreachable("bad evaluation kind");
1638}
1639
1641 const char *Name) {
1642 ErrorUnsupported(E, Name);
1643 return GetUndefRValue(E->getType());
1644}
1645
1647 const char *Name) {
1648 ErrorUnsupported(E, Name);
1649 llvm::Type *ElTy = ConvertType(E->getType());
1650 llvm::Type *Ty = DefaultPtrTy;
1651 return MakeAddrLValue(
1652 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1653}
1654
1656 const Expr *Base = Obj;
1657 while (!isa<CXXThisExpr>(Base)) {
1658 // The result of a dynamic_cast can be null.
1660 return false;
1661
1662 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1663 Base = CE->getSubExpr();
1664 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1665 Base = PE->getSubExpr();
1666 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1667 if (UO->getOpcode() == UO_Extension)
1668 Base = UO->getSubExpr();
1669 else
1670 return false;
1671 } else {
1672 return false;
1673 }
1674 }
1675 return true;
1676}
1677
1679 LValue LV;
1680 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1681 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1682 else
1683 LV = EmitLValue(E);
1684 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1685 SanitizerSet SkippedChecks;
1686 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1687 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1688 if (IsBaseCXXThis)
1689 SkippedChecks.set(SanitizerKind::Alignment, true);
1690 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1691 SkippedChecks.set(SanitizerKind::Null, true);
1692 }
1693 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1694 }
1695 return LV;
1696}
1697
1698/// EmitLValue - Emit code to compute a designator that specifies the location
1699/// of the expression.
1700///
1701/// This can return one of two things: a simple address or a bitfield reference.
1702/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1703/// an LLVM pointer type.
1704///
1705/// If this returns a bitfield reference, nothing about the pointee type of the
1706/// LLVM value is known: For example, it may not be a pointer to an integer.
1707///
1708/// If this returns a normal address, and if the lvalue's C type is fixed size,
1709/// this method guarantees that the returned pointer type will point to an LLVM
1710/// type of the same size of the lvalue's type. If the lvalue has a variable
1711/// length type, this is not possible.
1712///
1714 KnownNonNull_t IsKnownNonNull) {
1715 // Running with sufficient stack space to avoid deeply nested expressions
1716 // cause a stack overflow.
1717 LValue LV;
1718 CGM.runWithSufficientStackSpace(
1719 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1720
1721 if (IsKnownNonNull && !LV.isKnownNonNull())
1722 LV.setKnownNonNull();
1723 return LV;
1724}
1725
1726LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1727 KnownNonNull_t IsKnownNonNull) {
1728 ApplyDebugLocation DL(*this, E);
1729 switch (E->getStmtClass()) {
1730 default: return EmitUnsupportedLValue(E, "l-value expression");
1731
1732 case Expr::ObjCPropertyRefExprClass:
1733 llvm_unreachable("cannot emit a property reference directly");
1734
1735 case Expr::ObjCSelectorExprClass:
1737 case Expr::ObjCIsaExprClass:
1739 case Expr::BinaryOperatorClass:
1741 case Expr::CompoundAssignOperatorClass: {
1742 QualType Ty = E->getType();
1743 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1744 Ty = AT->getValueType();
1745 if (!Ty->isAnyComplexType())
1748 }
1749 case Expr::CallExprClass:
1750 case Expr::CXXMemberCallExprClass:
1751 case Expr::CXXOperatorCallExprClass:
1752 case Expr::UserDefinedLiteralClass:
1754 case Expr::CXXRewrittenBinaryOperatorClass:
1755 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1756 IsKnownNonNull);
1757 case Expr::VAArgExprClass:
1759 case Expr::DeclRefExprClass:
1761 case Expr::ConstantExprClass: {
1762 const ConstantExpr *CE = cast<ConstantExpr>(E);
1763 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE))
1765 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1766 }
1767 case Expr::ParenExprClass:
1768 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1769 case Expr::GenericSelectionExprClass:
1770 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1771 IsKnownNonNull);
1772 case Expr::PredefinedExprClass:
1774 case Expr::StringLiteralClass:
1776 case Expr::ObjCEncodeExprClass:
1778 case Expr::PseudoObjectExprClass:
1780 case Expr::InitListExprClass:
1782 case Expr::CXXTemporaryObjectExprClass:
1783 case Expr::CXXConstructExprClass:
1785 case Expr::CXXBindTemporaryExprClass:
1787 case Expr::CXXUuidofExprClass:
1789 case Expr::LambdaExprClass:
1790 return EmitAggExprToLValue(E);
1791
1792 case Expr::ExprWithCleanupsClass: {
1793 const auto *cleanups = cast<ExprWithCleanups>(E);
1794 RunCleanupsScope Scope(*this);
1795 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1796 if (LV.isSimple()) {
1797 // Defend against branches out of gnu statement expressions surrounded by
1798 // cleanups.
1799 Address Addr = LV.getAddress();
1800 llvm::Value *V = Addr.getBasePointer();
1801 Scope.ForceCleanup({&V});
1802 Addr.replaceBasePointer(V);
1803 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1804 LV.getBaseInfo(), LV.getTBAAInfo());
1805 }
1806 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1807 // bitfield lvalue or some other non-simple lvalue?
1808 return LV;
1809 }
1810
1811 case Expr::CXXDefaultArgExprClass: {
1812 auto *DAE = cast<CXXDefaultArgExpr>(E);
1813 CXXDefaultArgExprScope Scope(*this, DAE);
1814 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1815 }
1816 case Expr::CXXDefaultInitExprClass: {
1817 auto *DIE = cast<CXXDefaultInitExpr>(E);
1818 CXXDefaultInitExprScope Scope(*this, DIE);
1819 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1820 }
1821 case Expr::CXXTypeidExprClass:
1823
1824 case Expr::ObjCMessageExprClass:
1826 case Expr::ObjCIvarRefExprClass:
1828 case Expr::StmtExprClass:
1830 case Expr::UnaryOperatorClass:
1832 case Expr::ArraySubscriptExprClass:
1834 case Expr::MatrixSingleSubscriptExprClass:
1836 case Expr::MatrixSubscriptExprClass:
1838 case Expr::ArraySectionExprClass:
1840 case Expr::ExtVectorElementExprClass:
1842 case Expr::MatrixElementExprClass:
1844 case Expr::CXXThisExprClass:
1846 case Expr::MemberExprClass:
1848 case Expr::CompoundLiteralExprClass:
1850 case Expr::ConditionalOperatorClass:
1852 case Expr::BinaryConditionalOperatorClass:
1854 case Expr::ChooseExprClass:
1855 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1856 case Expr::OpaqueValueExprClass:
1858 case Expr::SubstNonTypeTemplateParmExprClass:
1859 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1860 IsKnownNonNull);
1861 case Expr::ImplicitCastExprClass:
1862 case Expr::CStyleCastExprClass:
1863 case Expr::CXXFunctionalCastExprClass:
1864 case Expr::CXXStaticCastExprClass:
1865 case Expr::CXXDynamicCastExprClass:
1866 case Expr::CXXReinterpretCastExprClass:
1867 case Expr::CXXConstCastExprClass:
1868 case Expr::CXXAddrspaceCastExprClass:
1869 case Expr::ObjCBridgedCastExprClass:
1870 return EmitCastLValue(cast<CastExpr>(E));
1871
1872 case Expr::MaterializeTemporaryExprClass:
1874
1875 case Expr::CoawaitExprClass:
1877 case Expr::CoyieldExprClass:
1879 case Expr::PackIndexingExprClass:
1880 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1881 case Expr::HLSLOutArgExprClass:
1882 llvm_unreachable("cannot emit a HLSL out argument directly");
1883 }
1884}
1885
1886/// Given an object of the given canonical type, can we safely copy a
1887/// value out of it based on its initializer?
1889 assert(type.isCanonical());
1890 assert(!type->isReferenceType());
1891
1892 // Must be const-qualified but non-volatile.
1893 Qualifiers qs = type.getLocalQualifiers();
1894 if (!qs.hasConst() || qs.hasVolatile()) return false;
1895
1896 // Otherwise, all object types satisfy this except C++ classes with
1897 // mutable subobjects or non-trivial copy/destroy behavior.
1898 if (const auto *RT = dyn_cast<RecordType>(type))
1899 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
1900 RD = RD->getDefinitionOrSelf();
1901 if (RD->hasMutableFields() || !RD->isTrivial())
1902 return false;
1903 }
1904
1905 return true;
1906}
1907
1908/// Can we constant-emit a load of a reference to a variable of the
1909/// given type? This is different from predicates like
1910/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1911/// in situations that don't necessarily satisfy the language's rules
1912/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1913/// to do this with const float variables even if those variables
1914/// aren't marked 'constexpr'.
1922 type = type.getCanonicalType();
1923 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1924 if (isConstantEmittableObjectType(ref->getPointeeType()))
1926 return CEK_AsReferenceOnly;
1927 }
1929 return CEK_AsValueOnly;
1930 return CEK_None;
1931}
1932
1933/// Try to emit a reference to the given value without producing it as
1934/// an l-value. This is just an optimization, but it avoids us needing
1935/// to emit global copies of variables if they're named without triggering
1936/// a formal use in a context where we can't emit a direct reference to them,
1937/// for instance if a block or lambda or a member of a local class uses a
1938/// const int variable or constexpr variable from an enclosing function.
1941 const ValueDecl *Value = RefExpr->getDecl();
1942
1943 // The value needs to be an enum constant or a constant variable.
1945 if (isa<ParmVarDecl>(Value)) {
1946 CEK = CEK_None;
1947 } else if (const auto *var = dyn_cast<VarDecl>(Value)) {
1948 CEK = checkVarTypeForConstantEmission(var->getType());
1949 } else if (isa<EnumConstantDecl>(Value)) {
1950 CEK = CEK_AsValueOnly;
1951 } else {
1952 CEK = CEK_None;
1953 }
1954 if (CEK == CEK_None) return ConstantEmission();
1955
1956 Expr::EvalResult result;
1957 bool resultIsReference;
1958 QualType resultType;
1959
1960 // It's best to evaluate all the way as an r-value if that's permitted.
1961 if (CEK != CEK_AsReferenceOnly &&
1962 RefExpr->EvaluateAsRValue(result, getContext())) {
1963 resultIsReference = false;
1964 resultType = RefExpr->getType().getUnqualifiedType();
1965
1966 // Otherwise, try to evaluate as an l-value.
1967 } else if (CEK != CEK_AsValueOnly &&
1968 RefExpr->EvaluateAsLValue(result, getContext())) {
1969 resultIsReference = true;
1970 resultType = Value->getType();
1971
1972 // Failure.
1973 } else {
1974 return ConstantEmission();
1975 }
1976
1977 // In any case, if the initializer has side-effects, abandon ship.
1978 if (result.HasSideEffects)
1979 return ConstantEmission();
1980
1981 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1982 // referencing a global host variable by copy. In this case the lambda should
1983 // make a copy of the value of the global host variable. The DRE of the
1984 // captured reference variable cannot be emitted as load from the host
1985 // global variable as compile time constant, since the host variable is not
1986 // accessible on device. The DRE of the captured reference variable has to be
1987 // loaded from captures.
1988 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1990 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1991 if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1992 const APValue::LValueBase &base = result.Val.getLValueBase();
1993 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1994 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1995 if (!VD->hasAttr<CUDADeviceAttr>()) {
1996 return ConstantEmission();
1997 }
1998 }
1999 }
2000 }
2001 }
2002
2003 // Emit as a constant.
2004 llvm::Constant *C = ConstantEmitter(*this).emitAbstract(
2005 RefExpr->getLocation(), result.Val, resultType);
2006
2007 // Make sure we emit a debug reference to the global variable.
2008 // This should probably fire even for
2009 if (isa<VarDecl>(Value)) {
2010 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(Value)))
2011 EmitDeclRefExprDbgValue(RefExpr, result.Val);
2012 } else {
2014 EmitDeclRefExprDbgValue(RefExpr, result.Val);
2015 }
2016
2017 // If we emitted a reference constant, we need to dereference that.
2018 if (resultIsReference)
2020
2022}
2023
2025 const MemberExpr *ME) {
2026 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
2027 // Try to emit static variable member expressions as DREs.
2028 return DeclRefExpr::Create(
2030 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
2031 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
2032 }
2033 return nullptr;
2034}
2035
2039 return tryEmitAsConstant(DRE);
2040 return ConstantEmission();
2041}
2042
2045 assert(Constant && "not a constant");
2046 if (Constant.isReference())
2047 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
2048 E->getExprLoc())
2049 .getScalarVal();
2050 return Constant.getValue();
2051}
2052
2054 SourceLocation Loc) {
2055 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
2056 lvalue.getType(), Loc, lvalue.getBaseInfo(),
2057 lvalue.getTBAAInfo(), lvalue.isNontemporal());
2058}
2059
2060// This method SHOULD NOT be extended to support additional types, like BitInt
2061// types, without an opt-in bool controlled by a CodeGenOptions setting (like
2062// -fstrict-bool) and a new UBSan check (like SanitizerKind::Bool) as breaking
2063// that assumption would lead to memory corruption. See link for examples of how
2064// having a bool that has a value different from 0 or 1 in memory can lead to
2065// memory corruption.
2066// https://discourse.llvm.org/t/defining-what-happens-when-a-bool-isn-t-0-or-1/86778
2067static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min,
2068 llvm::APInt &End, bool StrictEnums, bool StrictBool,
2069 bool IsBool) {
2070 const auto *ED = Ty->getAsEnumDecl();
2071 bool IsRegularCPlusPlusEnum =
2072 CGF.getLangOpts().CPlusPlus && StrictEnums && ED && !ED->isFixed();
2073 if (!IsBool && !IsRegularCPlusPlusEnum)
2074 return false;
2075
2076 if (IsBool) {
2077 if (!StrictBool)
2078 return false;
2079 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
2080 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
2081 } else {
2082 ED->getValueRange(End, Min);
2083 }
2084 return true;
2085}
2086
2087llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
2088 llvm::APInt Min, End;
2089 bool IsBool = Ty->hasBooleanRepresentation() && !Ty->isVectorType();
2090 bool StrictBoolEnabled = CGM.getCodeGenOpts().getLoadBoolFromMem() ==
2092 if (!getRangeForType(*this, Ty, Min, End,
2093 /*StrictEnums=*/CGM.getCodeGenOpts().StrictEnums,
2094 /*StrictBool=*/StrictBoolEnabled, /*IsBool=*/IsBool))
2095 return nullptr;
2096
2097 llvm::MDBuilder MDHelper(getLLVMContext());
2098 return MDHelper.createRange(Min, End);
2099}
2100
2102 SourceLocation Loc) {
2103 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2104 // In order to prevent the optimizer from throwing away the check, don't
2105 // attach range metadata to the load.
2106 } else if (CGM.getCodeGenOpts().isOptimizedBuild()) {
2107 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2108 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2109 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2110 llvm::MDNode::get(CGM.getLLVMContext(), {}));
2111 }
2112 }
2113}
2114
2116 SourceLocation Loc) {
2117 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
2118 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
2119 if (!HasBoolCheck && !HasEnumCheck)
2120 return false;
2121
2122 bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) ||
2123 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
2124 bool NeedsBoolCheck = HasBoolCheck && IsBool;
2125 bool NeedsEnumCheck = HasEnumCheck && Ty->isEnumeralType();
2126 if (!NeedsBoolCheck && !NeedsEnumCheck)
2127 return false;
2128
2129 // Single-bit booleans don't need to be checked. Special-case this to avoid
2130 // a bit width mismatch when handling bitfield values. This is handled by
2131 // EmitFromMemory for the non-bitfield case.
2132 if (IsBool &&
2133 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
2134 return false;
2135
2136 if (NeedsEnumCheck &&
2137 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
2138 return false;
2139
2140 llvm::APInt Min, End;
2141 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true,
2142 /*StrictBool=*/true, IsBool))
2143 return true;
2144
2146 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
2147
2148 auto &Ctx = getLLVMContext();
2149 auto CheckHandler = SanitizerHandler::LoadInvalidValue;
2150 SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler);
2151 llvm::Value *Check;
2152 --End;
2153 if (!Min) {
2154 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
2155 } else {
2156 llvm::Value *Upper =
2157 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
2158 llvm::Value *Lower =
2159 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
2160 Check = Builder.CreateAnd(Upper, Lower);
2161 }
2162 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
2164 EmitCheck(std::make_pair(Check, Kind), CheckHandler, StaticArgs, Value);
2165 return true;
2166}
2167
2169 QualType Ty,
2170 SourceLocation Loc,
2171 LValueBaseInfo BaseInfo,
2172 TBAAAccessInfo TBAAInfo,
2173 bool isNontemporal) {
2174 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2175 if (GV->isThreadLocal())
2176 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2178
2179 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2180 // Boolean vectors use `iN` as storage type.
2181 if (ClangVecTy->isPackedVectorBoolType(getContext())) {
2182 llvm::Type *ValTy = ConvertType(Ty);
2183 unsigned ValNumElems =
2184 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2185 // Load the `iP` storage object (P is the padded vector size).
2186 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
2187 const auto *RawIntTy = RawIntV->getType();
2188 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
2189 // Bitcast iP --> <P x i1>.
2190 auto *PaddedVecTy = llvm::FixedVectorType::get(
2191 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2192 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2193 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2194 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2195
2196 return EmitFromMemory(V, Ty);
2197 }
2198
2199 // Handles vectors of sizes that are likely to be expanded to a larger size
2200 // to optimize performance.
2201 auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2202 auto *NewVecTy =
2203 CGM.getABIInfo().getOptimalVectorMemoryType(VTy, getLangOpts());
2204
2205 if (VTy != NewVecTy) {
2206 Address Cast = Addr.withElementType(NewVecTy);
2207 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2208 unsigned OldNumElements = VTy->getNumElements();
2209 SmallVector<int, 16> Mask(OldNumElements);
2210 std::iota(Mask.begin(), Mask.end(), 0);
2211 V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2212 return EmitFromMemory(V, Ty);
2213 }
2214 }
2215
2216 // Atomic operations have to be done on integral types.
2217 LValue AtomicLValue =
2218 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2219 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2220 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2221 }
2222
2223 Addr =
2224 Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
2225
2226 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2227 if (isNontemporal) {
2228 llvm::MDNode *Node = llvm::MDNode::get(
2229 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2230 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2231 }
2232
2233 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2234
2235 maybeAttachRangeForLoad(Load, Ty, Loc);
2236
2237 return EmitFromMemory(Load, Ty);
2238}
2239
2240/// Converts a scalar value from its primary IR type (as returned
2241/// by ConvertType) to its load/store type (as returned by
2242/// convertTypeForLoadStore).
2243llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2244 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2245 Ty = AtomicTy->getValueType();
2246
2247 if (Ty->isExtVectorBoolType() || Ty->isConstantMatrixBoolType()) {
2248 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2249
2250 if (Value->getType() == StoreTy)
2251 return Value;
2252
2253 if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() >
2254 Value->getType()->getScalarSizeInBits())
2255 return Builder.CreateZExt(Value, StoreTy);
2256
2257 // Expand to the memory bit width.
2258 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2259 // <N x i1> --> <P x i1>.
2260 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2261 // <P x i1> --> iP.
2262 Value = Builder.CreateBitCast(Value, StoreTy);
2263 }
2264
2265 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) {
2266 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2268 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2269 }
2270
2271 return Value;
2272}
2273
2274/// Converts a scalar value from its load/store type (as returned
2275/// by convertTypeForLoadStore) to its primary IR type (as returned
2276/// by ConvertType).
2277llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2278 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2279 Ty = AtomicTy->getValueType();
2280
2282 const auto *RawIntTy = Value->getType();
2283
2284 // Bitcast iP --> <P x i1>.
2285 auto *PaddedVecTy = llvm::FixedVectorType::get(
2286 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2287 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2288 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2289 llvm::Type *ValTy = ConvertType(Ty);
2290 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2291 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2292 }
2293
2294 llvm::Type *ResTy = ConvertType(Ty);
2295 bool HasBoolRep = Ty->hasBooleanRepresentation() || Ty->isExtVectorBoolType();
2296 if (HasBoolRep && CGM.getCodeGenOpts().isConvertingBoolWithCmp0()) {
2297 return Builder.CreateICmpNE(
2298 Value, llvm::Constant::getNullValue(Value->getType()), "loadedv");
2299 }
2300 if (HasBoolRep || Ty->isBitIntType())
2301 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2302
2303 return Value;
2304}
2305
2306// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2307// MatrixType), if it points to a array (the memory type of MatrixType).
2309 CodeGenFunction &CGF,
2310 bool IsVector = true) {
2311 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2312 if (ArrayTy && IsVector) {
2313 auto ArrayElements = ArrayTy->getNumElements();
2314 auto *ArrayElementTy = ArrayTy->getElementType();
2315 if (CGF.getContext().getLangOpts().HLSL) {
2316 auto *VectorTy = cast<llvm::FixedVectorType>(ArrayElementTy);
2317 ArrayElementTy = VectorTy->getElementType();
2318 ArrayElements *= VectorTy->getNumElements();
2319 }
2320 auto *VectorTy = llvm::FixedVectorType::get(ArrayElementTy, ArrayElements);
2321
2322 return Addr.withElementType(VectorTy);
2323 }
2324 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2325 if (VectorTy && !IsVector) {
2326 auto *ArrayTy = llvm::ArrayType::get(
2327 VectorTy->getElementType(),
2328 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2329
2330 return Addr.withElementType(ArrayTy);
2331 }
2332
2333 return Addr;
2334}
2335
2337 LValue Base;
2338 if (E->getBase()->isGLValue())
2339 Base = EmitLValue(E->getBase());
2340 else {
2341 assert(E->getBase()->getType()->isConstantMatrixType() &&
2342 "Result must be a Constant Matrix");
2343 llvm::Value *Mat = EmitScalarExpr(E->getBase());
2344 Address MatMem = CreateMemTemp(E->getBase()->getType());
2345 QualType Ty = E->getBase()->getType();
2346 llvm::Type *LTy = convertTypeForLoadStore(Ty, Mat->getType());
2347 if (LTy->getScalarSizeInBits() > Mat->getType()->getScalarSizeInBits())
2348 Mat = Builder.CreateZExt(Mat, LTy);
2349 Builder.CreateStore(Mat, MatMem);
2351 }
2352 QualType ResultType =
2353 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2354
2355 // Encode the element access list into a vector of unsigned indices.
2356 // getEncodedElementAccess returns row-major linearized indices.
2358 E->getEncodedElementAccess(Indices);
2359
2360 // getEncodedElementAccess returns row-major linearized indices
2361 // If the matrix memory layout is column-major, convert indices
2362 // to column-major indices.
2363 bool IsColMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2365 if (IsColMajor) {
2366 const auto *MT = E->getBase()->getType()->castAs<ConstantMatrixType>();
2367 unsigned NumCols = MT->getNumColumns();
2368 for (uint32_t &Idx : Indices) {
2369 // Decompose row-major index: Row = Idx / NumCols, Col = Idx % NumCols
2370 unsigned Row = Idx / NumCols;
2371 unsigned Col = Idx % NumCols;
2372 // Re-linearize as column-major
2373 Idx = MT->getColumnMajorFlattenedIndex(Row, Col);
2374 }
2375 }
2376
2377 if (Base.isSimple()) {
2378 RawAddress MatAddr = Base.getAddress();
2379 if (getLangOpts().HLSL &&
2381 MatAddr = CGM.getHLSLRuntime().createBufferMatrixTempAddress(
2382 Base, E->getExprLoc(), *this);
2383
2384 llvm::Constant *CV =
2385 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
2387 CV, ResultType, Base.getBaseInfo(),
2388 TBAAAccessInfo());
2389 }
2390 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2391
2392 llvm::Constant *BaseElts = Base.getExtVectorElts();
2394
2395 for (unsigned Index : Indices)
2396 CElts.push_back(BaseElts->getAggregateElement(Index));
2397 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2398
2400 MaybeConvertMatrixAddress(Base.getExtVectorAddress(), *this), CV,
2401 ResultType, Base.getBaseInfo(), TBAAAccessInfo());
2402}
2403
2404// Emit a store of a matrix LValue. This may require casting the original
2405// pointer to memory address (ArrayType) to a pointer to the value type
2406// (VectorType).
2407static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2408 bool isInit, CodeGenFunction &CGF) {
2409 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2410 value->getType()->isVectorTy());
2411 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2412 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2413 lvalue.isNontemporal());
2414}
2415
2417 bool Volatile, QualType Ty,
2418 LValueBaseInfo BaseInfo,
2419 TBAAAccessInfo TBAAInfo,
2420 bool isInit, bool isNontemporal) {
2421 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2422 if (GV->isThreadLocal())
2423 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2425
2426 // Handles vectors of sizes that are likely to be expanded to a larger size
2427 // to optimize performance.
2428 llvm::Type *SrcTy = Value->getType();
2429 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2430 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2431 auto *NewVecTy =
2432 CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts());
2433 if (!ClangVecTy->isPackedVectorBoolType(getContext()) &&
2434 VecTy != NewVecTy) {
2435 SmallVector<int, 16> Mask(NewVecTy->getNumElements(),
2436 VecTy->getNumElements());
2437 std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2438 // Use undef instead of poison for the padding lanes, to make sure no
2439 // padding bits are poisoned, which may break coercion.
2440 Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
2441 Mask, "extractVec");
2442 SrcTy = NewVecTy;
2443 }
2444 if (Addr.getElementType() != SrcTy)
2445 Addr = Addr.withElementType(SrcTy);
2446 }
2447 }
2448
2449 Value = EmitToMemory(Value, Ty);
2450
2451 LValue AtomicLValue =
2452 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2453 if (Ty->isAtomicType() ||
2454 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2455 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2456 return;
2457 }
2458
2459 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2461
2462 if (isNontemporal) {
2463 llvm::MDNode *Node =
2464 llvm::MDNode::get(Store->getContext(),
2465 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2466 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2467 }
2468
2469 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2470}
2471
2472void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2473 bool isInit) {
2474 if (lvalue.getType()->isConstantMatrixType()) {
2475 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2476 return;
2477 }
2478
2479 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2480 lvalue.getType(), lvalue.getBaseInfo(),
2481 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2482}
2483
2484// Emit a load of a LValue of matrix type. This may require casting the pointer
2485// to memory address (ArrayType) to a pointer to the value type (VectorType).
2487 CodeGenFunction &CGF) {
2488 assert(LV.getType()->isConstantMatrixType());
2489 RawAddress DestAddr = LV.getAddress();
2490
2491 // HLSL constant buffers may pad matrix layouts, so copy elements into a
2492 // non-padded local alloca before loading.
2493 if (CGF.getLangOpts().HLSL &&
2494 LV.getType().getAddressSpace() == LangAS::hlsl_constant)
2495 DestAddr =
2497
2498 Address Addr = MaybeConvertMatrixAddress(DestAddr, CGF);
2499 LV.setAddress(Addr);
2500 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2501}
2502
2504 SourceLocation Loc) {
2505 QualType Ty = LV.getType();
2506 switch (getEvaluationKind(Ty)) {
2507 case TEK_Scalar:
2508 return EmitLoadOfLValue(LV, Loc);
2509 case TEK_Complex:
2510 return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
2511 case TEK_Aggregate:
2512 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2513 return Slot.asRValue();
2514 }
2515 llvm_unreachable("bad evaluation kind");
2516}
2517
2518/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2519/// method emits the address of the lvalue, then loads the result as an rvalue,
2520/// returning the rvalue.
2522 // Load from __ptrauth.
2523 if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) {
2525 llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal();
2526 return RValue::get(EmitPointerAuthUnqualify(PtrAuth, Value, LV.getType(),
2527 LV.getAddress(),
2528 /*known nonnull*/ false));
2529 }
2530
2531 if (LV.isObjCWeak()) {
2532 // load of a __weak object.
2533 Address AddrWeakObj = LV.getAddress();
2534 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2535 AddrWeakObj));
2536 }
2538 // In MRC mode, we do a load+autorelease.
2539 if (!getLangOpts().ObjCAutoRefCount) {
2541 }
2542
2543 // In ARC mode, we load retained and then consume the value.
2544 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2546 return RValue::get(Object);
2547 }
2548
2549 if (LV.isSimple()) {
2550 assert(!LV.getType()->isFunctionType());
2551
2552 if (LV.getType()->isConstantMatrixType())
2553 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2554
2555 // Everything needs a load.
2556 return RValue::get(EmitLoadOfScalar(LV, Loc));
2557 }
2558
2559 if (LV.isVectorElt()) {
2560 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2561 LV.isVolatileQualified());
2562 llvm::Value *Elt =
2563 Builder.CreateExtractElement(Load, LV.getVectorIdx(), "vecext");
2564 return RValue::get(EmitFromMemory(Elt, LV.getType()));
2565 }
2566
2567 // If this is a reference to a subset of the elements of a vector, either
2568 // shuffle the input or extract/insert them as appropriate.
2569 if (LV.isExtVectorElt()) {
2571 }
2572
2573 // Global Register variables always invoke intrinsics
2574 if (LV.isGlobalReg())
2575 return EmitLoadOfGlobalRegLValue(LV);
2576
2577 if (LV.isMatrixElt()) {
2578 llvm::Value *Idx = LV.getMatrixIdx();
2579 QualType EltTy = LV.getType();
2580 if (const auto *MatTy = EltTy->getAs<ConstantMatrixType>()) {
2581 EltTy = MatTy->getElementType();
2582 if (CGM.getCodeGenOpts().isOptimizedBuild()) {
2583 llvm::MatrixBuilder MB(Builder);
2584 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2585 }
2586 }
2587 llvm::LoadInst *Load =
2588 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2589 llvm::Value *Elt = Builder.CreateExtractElement(Load, Idx, "matrixext");
2590 return RValue::get(EmitFromMemory(Elt, EltTy));
2591 }
2592 if (LV.isMatrixRow()) {
2593 QualType MatTy = LV.getType();
2594 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
2595
2596 unsigned NumRows = MT->getNumRows();
2597 unsigned NumCols = MT->getNumColumns();
2598 unsigned NumLanes = NumCols;
2599 llvm::Value *MatrixVec = EmitLoadOfScalar(LV, Loc);
2600 llvm::Value *Row = LV.getMatrixRowIdx();
2601 llvm::Type *ElemTy = ConvertType(MT->getElementType());
2602 llvm::Constant *ColConstsIndices = nullptr;
2603 llvm::MatrixBuilder MB(Builder);
2604
2605 if (LV.isMatrixRowSwizzle()) {
2606 ColConstsIndices = LV.getMatrixRowElts();
2607 NumLanes = llvm::cast<llvm::FixedVectorType>(ColConstsIndices->getType())
2608 ->getNumElements();
2609 }
2610
2611 llvm::Type *RowTy = llvm::FixedVectorType::get(ElemTy, NumLanes);
2612 llvm::Value *Result = llvm::PoisonValue::get(RowTy); // <NumLanes x T>
2613
2614 for (unsigned Col = 0; Col < NumLanes; ++Col) {
2615 llvm::Value *ColIdx;
2616 if (ColConstsIndices)
2617 ColIdx = ColConstsIndices->getAggregateElement(Col);
2618 else
2619 ColIdx = llvm::ConstantInt::get(Row->getType(), Col);
2620 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2622 llvm::Value *EltIndex =
2623 MB.CreateIndex(Row, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
2624 llvm::Value *Elt = Builder.CreateExtractElement(MatrixVec, EltIndex);
2625 llvm::Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2626 Result = Builder.CreateInsertElement(Result, Elt, Lane);
2627 }
2628
2629 return RValue::get(Result);
2630 }
2631
2632 assert(LV.isBitField() && "Unknown LValue type!");
2633 return EmitLoadOfBitfieldLValue(LV, Loc);
2634}
2635
2637 SourceLocation Loc) {
2638 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2639
2640 // Get the output type.
2641 llvm::Type *ResLTy = ConvertType(LV.getType());
2642
2643 Address Ptr = LV.getBitFieldAddress();
2644 llvm::Value *Val =
2645 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2646
2647 bool UseVolatile = LV.isVolatileQualified() &&
2648 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2649 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2650 const unsigned StorageSize =
2651 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2652 if (Info.IsSigned) {
2653 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2654 unsigned HighBits = StorageSize - Offset - Info.Size;
2655 if (HighBits)
2656 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2657 if (Offset + HighBits)
2658 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2659 } else {
2660 if (Offset)
2661 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2662 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2663 Val = Builder.CreateAnd(
2664 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2665 }
2666 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2667 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2668 return RValue::get(Val);
2669}
2670
2671// If this is a reference to a subset of the elements of a vector, create an
2672// appropriate shufflevector.
2674 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2675 LV.isVolatileQualified());
2676
2677 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2678 // IR value to a vector here allows the rest of codegen to behave as normal.
2679 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2680 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2681 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2682 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2683 }
2684
2685 const llvm::Constant *Elts = LV.getExtVectorElts();
2686
2687 // If the result of the expression is a non-vector type, we must be extracting
2688 // a single element. Just codegen as an extractelement.
2689 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2690 if (!ExprVT) {
2691 unsigned InIdx = getAccessedFieldNo(0, Elts);
2692 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2693
2694 llvm::Value *Element = Builder.CreateExtractElement(Vec, Elt);
2695
2696 llvm::Type *LVTy = ConvertType(LV.getType());
2697 if (Element->getType()->getPrimitiveSizeInBits() >
2698 LVTy->getPrimitiveSizeInBits()) {
2699 if (LV.getType()->hasBooleanRepresentation() &&
2700 CGM.getCodeGenOpts().isConvertingBoolWithCmp0())
2701 Element = Builder.CreateICmpNE(
2702 Element, llvm::Constant::getNullValue(Element->getType()));
2703 else
2704 Element = Builder.CreateTrunc(Element, LVTy);
2705 }
2706
2707 return RValue::get(Element);
2708 }
2709
2710 // Always use shuffle vector to try to retain the original program structure
2711 unsigned NumResultElts = ExprVT->getNumElements();
2712
2714 for (unsigned i = 0; i != NumResultElts; ++i)
2715 Mask.push_back(getAccessedFieldNo(i, Elts));
2716
2717 Vec = Builder.CreateShuffleVector(Vec, Mask);
2718
2719 if (LV.getType()->isExtVectorBoolType()) {
2720 if (CGM.getCodeGenOpts().isConvertingBoolWithCmp0())
2721 Vec = Builder.CreateICmpNE(Vec,
2722 llvm::Constant::getNullValue(Vec->getType()));
2723 else
2724 Vec = Builder.CreateTrunc(Vec, ConvertType(LV.getType()), "truncv");
2725 }
2726
2727 return RValue::get(Vec);
2728}
2729
2730/// Generates lvalue for partial ext_vector access.
2732 Address VectorAddress = LV.getExtVectorAddress();
2733 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2734 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2735
2736 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2737
2738 const llvm::Constant *Elts = LV.getExtVectorElts();
2739 unsigned ix = getAccessedFieldNo(0, Elts);
2740
2741 Address VectorBasePtrPlusIx =
2742 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2743 "vector.elt");
2744
2745 return VectorBasePtrPlusIx;
2746}
2747
2748/// Load of global named registers are always calls to intrinsics.
2750 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2751 "Bad type for register variable");
2752 llvm::MDNode *RegName = cast<llvm::MDNode>(
2753 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2754
2755 // We accept integer and pointer types only
2756 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2757 llvm::Type *Ty = OrigTy;
2758 if (OrigTy->isPointerTy())
2759 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2760 llvm::Type *Types[] = { Ty };
2761
2762 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2763 llvm::Value *Call = Builder.CreateCall(
2764 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2765 if (OrigTy->isPointerTy())
2766 Call = Builder.CreateIntToPtr(Call, OrigTy);
2767 return RValue::get(Call);
2768}
2769
2770/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2771/// lvalue, where both are guaranteed to the have the same type, and that type
2772/// is 'Ty'.
2774 bool isInit) {
2775 if (!Dst.isSimple()) {
2776 if (Dst.isVectorElt()) {
2777 if (getLangOpts().HLSL) {
2778 // HLSL allows direct access to vector elements, so storing to
2779 // individual elements of a vector through VectorElt is handled as
2780 // separate store instructions.
2781 Address DstAddr = Dst.getVectorAddress();
2782 llvm::Type *DestAddrTy = DstAddr.getElementType();
2783 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2785 CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2786
2787 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2788 "vector element type must be at least byte-sized");
2789
2790 llvm::Value *Val = Src.getScalarVal();
2791 if (Val->getType()->getPrimitiveSizeInBits() <
2792 ElemTy->getScalarSizeInBits())
2793 Val = Builder.CreateZExt(Val, ElemTy->getScalarType());
2794
2795 llvm::Value *Idx = Dst.getVectorIdx();
2796 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2797 Address DstElemAddr =
2798 Builder.CreateGEP(DstAddr, {Zero, Idx}, DestAddrTy, ElemAlign);
2799 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
2800 return;
2801 }
2802
2803 // Read/modify/write the vector, inserting the new element.
2804 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2805 Dst.isVolatileQualified());
2806 llvm::Type *VecTy = Vec->getType();
2807 llvm::Value *SrcVal = Src.getScalarVal();
2808
2809 if (VecTy->isVectorTy() && SrcVal->getType()->getPrimitiveSizeInBits() <
2810 VecTy->getScalarSizeInBits())
2811 SrcVal = Builder.CreateZExt(SrcVal, VecTy->getScalarType());
2812
2813 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2814 if (IRStoreTy) {
2815 auto *IRVecTy = llvm::FixedVectorType::get(
2816 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2817 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2818 // iN --> <N x i1>.
2819 }
2820
2821 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2822 // types which are mapped to vector LLVM IR types (e.g. for implementing
2823 // an ABI).
2824 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2825 EltTy && EltTy->getNumElements() == 1)
2826 SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2827
2828 Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2829 "vecins");
2830 if (IRStoreTy) {
2831 // <N x i1> --> <iN>.
2832 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2833 }
2834
2835 auto *I = Builder.CreateStore(Vec, Dst.getVectorAddress(),
2836 Dst.isVolatileQualified());
2838 return;
2839 }
2840
2841 // If this is an update of extended vector elements, insert them as
2842 // appropriate.
2843 if (Dst.isExtVectorElt())
2845
2846 if (Dst.isGlobalReg())
2847 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2848
2849 if (Dst.isMatrixElt()) {
2850 if (getLangOpts().HLSL) {
2851 // HLSL allows direct access to matrix elements, so storing to
2852 // individual elements of a matrix through MatrixElt is handled as
2853 // separate store instructions.
2854 Address DstAddr = Dst.getMatrixAddress();
2855 llvm::Type *DestAddrTy = DstAddr.getElementType();
2856 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2858 CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2859
2860 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2861 "matrix element type must be at least byte-sized");
2862
2863 llvm::Value *Val = Src.getScalarVal();
2864 if (Val->getType()->getPrimitiveSizeInBits() <
2865 ElemTy->getScalarSizeInBits())
2866 Val = Builder.CreateZExt(Val, ElemTy->getScalarType());
2867
2868 llvm::Value *Idx = Dst.getMatrixIdx();
2869 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2870 Address DstElemAddr =
2871 Builder.CreateGEP(DstAddr, {Zero, Idx}, DestAddrTy, ElemAlign);
2872 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
2873 return;
2874 }
2875
2876 llvm::Value *Idx = Dst.getMatrixIdx();
2877 if (CGM.getCodeGenOpts().isOptimizedBuild()) {
2878 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2879 llvm::MatrixBuilder MB(Builder);
2880 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2881 }
2882 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2883 llvm::Value *InsertVal = Src.getScalarVal();
2884 llvm::Value *Vec =
2885 Builder.CreateInsertElement(Load, InsertVal, Idx, "matins");
2886 auto *I = Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2887 Dst.isVolatileQualified());
2889 return;
2890 }
2891 if (Dst.isMatrixRow()) {
2892 // NOTE: Since there are no other languages that implement matrix single
2893 // subscripting, the logic here is specific to HLSL which allows
2894 // per-element stores to rows of matrices.
2895 assert(getLangOpts().HLSL &&
2896 "Store through matrix row LValues is only implemented for HLSL!");
2897 QualType MatTy = Dst.getType();
2898 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
2899
2900 unsigned NumRows = MT->getNumRows();
2901 unsigned NumCols = MT->getNumColumns();
2902 unsigned NumLanes = NumCols;
2903
2904 Address DstAddr = Dst.getMatrixAddress();
2905 llvm::Type *DestAddrTy = DstAddr.getElementType();
2906 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2907 CharUnits ElemAlign =
2908 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(ElemTy));
2909
2910 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2911 "matrix element type must be at least byte-sized");
2912
2913 llvm::Value *RowVal = Src.getScalarVal();
2914 if (RowVal->getType()->getScalarType()->getPrimitiveSizeInBits() <
2915 ElemTy->getScalarSizeInBits()) {
2916 auto *RowValVecTy = cast<llvm::FixedVectorType>(RowVal->getType());
2917 llvm::Type *StorageElmTy = llvm::FixedVectorType::get(
2918 ElemTy->getScalarType(), RowValVecTy->getNumElements());
2919 RowVal = Builder.CreateZExt(RowVal, StorageElmTy);
2920 }
2921
2922 llvm::MatrixBuilder MB(Builder);
2923
2924 llvm::Constant *ColConstsIndices = nullptr;
2925 if (Dst.isMatrixRowSwizzle()) {
2926 ColConstsIndices = Dst.getMatrixRowElts();
2927 NumLanes =
2928 llvm::cast<llvm::FixedVectorType>(ColConstsIndices->getType())
2929 ->getNumElements();
2930 }
2931
2932 llvm::Value *Row = Dst.getMatrixRowIdx();
2933 for (unsigned Col = 0; Col < NumLanes; ++Col) {
2934 llvm::Value *ColIdx;
2935 if (ColConstsIndices)
2936 ColIdx = ColConstsIndices->getAggregateElement(Col);
2937 else
2938 ColIdx = llvm::ConstantInt::get(Row->getType(), Col);
2939 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2941 llvm::Value *EltIndex =
2942 MB.CreateIndex(Row, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
2943 llvm::Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2944 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2945 llvm::Value *NewElt = Builder.CreateExtractElement(RowVal, Lane);
2946 Address DstElemAddr =
2947 Builder.CreateGEP(DstAddr, {Zero, EltIndex}, DestAddrTy, ElemAlign);
2948 Builder.CreateStore(NewElt, DstElemAddr, Dst.isVolatileQualified());
2949 }
2950
2951 return;
2952 }
2953
2954 assert(Dst.isBitField() && "Unknown LValue type");
2955 return EmitStoreThroughBitfieldLValue(Src, Dst);
2956 }
2957
2958 // Handle __ptrauth qualification by re-signing the value.
2959 if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) {
2960 Src = RValue::get(EmitPointerAuthQualify(PointerAuth, Src.getScalarVal(),
2961 Dst.getType(), Dst.getAddress(),
2962 /*known nonnull*/ false));
2963 }
2964
2965 // There's special magic for assigning into an ARC-qualified l-value.
2966 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2967 switch (Lifetime) {
2969 llvm_unreachable("present but none");
2970
2972 // nothing special
2973 break;
2974
2976 if (isInit) {
2977 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2978 break;
2979 }
2980 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2981 return;
2982
2984 if (isInit)
2985 // Initialize and then skip the primitive store.
2987 else
2989 /*ignore*/ true);
2990 return;
2991
2994 Src.getScalarVal()));
2995 // fall into the normal path
2996 break;
2997 }
2998 }
2999
3000 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
3001 // load of a __weak object.
3002 Address LvalueDst = Dst.getAddress();
3003 llvm::Value *src = Src.getScalarVal();
3004 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
3005 return;
3006 }
3007
3008 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
3009 // load of a __strong object.
3010 Address LvalueDst = Dst.getAddress();
3011 llvm::Value *src = Src.getScalarVal();
3012 if (Dst.isObjCIvar()) {
3013 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
3014 llvm::Type *ResultType = IntPtrTy;
3016 llvm::Value *RHS = dst.emitRawPointer(*this);
3017 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
3018 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
3019 ResultType, "sub.ptr.lhs.cast");
3020 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
3021 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
3022 } else if (Dst.isGlobalObjCRef()) {
3023 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
3024 Dst.isThreadLocalRef());
3025 }
3026 else
3027 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
3028 return;
3029 }
3030
3031 assert(Src.isScalar() && "Can't emit an agg store with this method");
3032 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
3033}
3034
3036 llvm::Value **Result) {
3037 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
3038 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
3039 Address Ptr = Dst.getBitFieldAddress();
3040
3041 // Get the source value, truncated to the width of the bit-field.
3042 llvm::Value *SrcVal = Src.getScalarVal();
3043
3044 // Cast the source to the storage type and shift it into place.
3045 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
3046 /*isSigned=*/false);
3047 llvm::Value *MaskedVal = SrcVal;
3048
3049 const bool UseVolatile =
3050 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
3051 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
3052 const unsigned StorageSize =
3053 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
3054 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
3055 // See if there are other bits in the bitfield's storage we'll need to load
3056 // and mask together with source before storing.
3057 if (StorageSize != Info.Size) {
3058 assert(StorageSize > Info.Size && "Invalid bitfield size.");
3059 llvm::Value *Val =
3060 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
3061
3062 // Mask the source value as needed.
3063 if (!Dst.getType()->hasBooleanRepresentation())
3064 SrcVal = Builder.CreateAnd(
3065 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
3066 "bf.value");
3067 MaskedVal = SrcVal;
3068 if (Offset)
3069 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
3070
3071 // Mask out the original value.
3072 Val = Builder.CreateAnd(
3073 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
3074 "bf.clear");
3075
3076 // Or together the unchanged values and the source value.
3077 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
3078 } else {
3079 assert(Offset == 0);
3080 // According to the AACPS:
3081 // When a volatile bit-field is written, and its container does not overlap
3082 // with any non-bit-field member, its container must be read exactly once
3083 // and written exactly once using the access width appropriate to the type
3084 // of the container. The two accesses are not atomic.
3085 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
3086 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
3087 Builder.CreateLoad(Ptr, true, "bf.load");
3088 }
3089
3090 // Write the new value back out.
3091 auto *I = Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
3092 addInstToCurrentSourceAtom(I, SrcVal);
3093
3094 // Return the new value of the bit-field, if requested.
3095 if (Result) {
3096 llvm::Value *ResultVal = MaskedVal;
3097
3098 // Sign extend the value if needed.
3099 if (Info.IsSigned) {
3100 assert(Info.Size <= StorageSize);
3101 unsigned HighBits = StorageSize - Info.Size;
3102 if (HighBits) {
3103 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
3104 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
3105 }
3106 }
3107
3108 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
3109 "bf.result.cast");
3110 *Result = EmitFromMemory(ResultVal, Dst.getType());
3111 }
3112}
3113
3115 LValue Dst) {
3116 llvm::Value *SrcVal = Src.getScalarVal();
3117 Address DstAddr = Dst.getExtVectorAddress();
3118 const llvm::Constant *Elts = Dst.getExtVectorElts();
3119 if (DstAddr.getElementType()->getScalarSizeInBits() >
3120 SrcVal->getType()->getScalarSizeInBits())
3121 SrcVal = Builder.CreateZExt(
3122 SrcVal, convertTypeForLoadStore(Dst.getType(), SrcVal->getType()));
3123
3124 if (getLangOpts().HLSL) {
3125 llvm::Type *DestAddrTy = DstAddr.getElementType();
3126 // HLSL allows storing to scalar values through ExtVector component LValues.
3127 // To support this we need to handle the case where the destination address
3128 // is a scalar.
3129 if (!DestAddrTy->isVectorTy()) {
3130 assert(!Dst.getType()->isVectorType() &&
3131 "this should only occur for non-vector l-values");
3132 Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified());
3133 return;
3134 }
3135
3136 // HLSL allows direct access to vector elements, so storing to individual
3137 // elements of a vector through ExtVector is handled as separate store
3138 // instructions.
3139 // If we are updating multiple elements, Dst and Src are vectors; for
3140 // a single element update they are scalars.
3141 const VectorType *VTy = Dst.getType()->getAs<VectorType>();
3142 unsigned NumSrcElts = VTy ? VTy->getNumElements() : 1;
3144 CGM.getDataLayout().getPrefTypeAlign(DestAddrTy->getScalarType()));
3145 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
3146
3147 for (unsigned I = 0; I != NumSrcElts; ++I) {
3148 llvm::Value *Val = VTy ? Builder.CreateExtractElement(
3149 SrcVal, llvm::ConstantInt::get(Int32Ty, I))
3150 : SrcVal;
3151 unsigned FieldNo = getAccessedFieldNo(I, Elts);
3152 Address DstElemAddr = Address::invalid();
3153 if (FieldNo == 0)
3154 DstElemAddr = DstAddr.withAlignment(ElemAlign);
3155 else
3156 DstElemAddr = Builder.CreateGEP(
3157 DstAddr, {Zero, llvm::ConstantInt::get(Int32Ty, FieldNo)},
3158 DestAddrTy, ElemAlign);
3159 Builder.CreateStore(Val, DstElemAddr, Dst.isVolatileQualified());
3160 }
3161 return;
3162 }
3163
3164 // This access turns into a read/modify/write of the vector. Load the input
3165 // value now.
3166 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
3167 llvm::Type *VecTy = Vec->getType();
3168
3169 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
3170 unsigned NumSrcElts = VTy->getNumElements();
3171 unsigned NumDstElts = cast<llvm::FixedVectorType>(VecTy)->getNumElements();
3172 if (NumDstElts == NumSrcElts) {
3173 // Use shuffle vector is the src and destination are the same number of
3174 // elements and restore the vector mask since it is on the side it will be
3175 // stored.
3176 SmallVector<int, 4> Mask(NumDstElts);
3177 for (unsigned i = 0; i != NumSrcElts; ++i)
3178 Mask[getAccessedFieldNo(i, Elts)] = i;
3179
3180 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
3181 } else if (NumDstElts > NumSrcElts) {
3182 // Extended the source vector to the same length and then shuffle it
3183 // into the destination.
3184 // FIXME: since we're shuffling with undef, can we just use the indices
3185 // into that? This could be simpler.
3186 SmallVector<int, 4> ExtMask;
3187 for (unsigned i = 0; i != NumSrcElts; ++i)
3188 ExtMask.push_back(i);
3189 ExtMask.resize(NumDstElts, -1);
3190 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
3191 // build identity
3193 for (unsigned i = 0; i != NumDstElts; ++i)
3194 Mask.push_back(i);
3195
3196 // When the vector size is odd and .odd or .hi is used, the last element
3197 // of the Elts constant array will be one past the size of the vector.
3198 // Ignore the last element here, if it is greater than the mask size.
3199 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
3200 NumSrcElts--;
3201
3202 // modify when what gets shuffled in
3203 for (unsigned i = 0; i != NumSrcElts; ++i)
3204 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
3205 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
3206 } else {
3207 // We should never shorten the vector
3208 llvm_unreachable("unexpected shorten vector length");
3209 }
3210 } else {
3211 // If the Src is a scalar (not a vector), and the target is a vector it must
3212 // be updating one element.
3213 unsigned InIdx = getAccessedFieldNo(0, Elts);
3214 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
3215
3216 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
3217 }
3218
3219 Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
3220 Dst.isVolatileQualified());
3221}
3222
3223/// Store of global named registers are always calls to intrinsics.
3225 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
3226 "Bad type for register variable");
3227 llvm::MDNode *RegName = cast<llvm::MDNode>(
3228 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
3229 assert(RegName && "Register LValue is not metadata");
3230
3231 // We accept integer and pointer types only
3232 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
3233 llvm::Type *Ty = OrigTy;
3234 if (OrigTy->isPointerTy())
3235 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
3236 llvm::Type *Types[] = { Ty };
3237
3238 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
3239 llvm::Value *Value = Src.getScalarVal();
3240 if (OrigTy->isPointerTy())
3241 Value = Builder.CreatePtrToInt(Value, Ty);
3242 Builder.CreateCall(
3243 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
3244}
3245
3246// setObjCGCLValueClass - sets class of the lvalue for the purpose of
3247// generating write-barries API. It is currently a global, ivar,
3248// or neither.
3249static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
3250 LValue &LV,
3251 bool IsMemberAccess=false) {
3252 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
3253 return;
3254
3255 if (isa<ObjCIvarRefExpr>(E)) {
3256 QualType ExpTy = E->getType();
3257 if (IsMemberAccess && ExpTy->isPointerType()) {
3258 // If ivar is a structure pointer, assigning to field of
3259 // this struct follows gcc's behavior and makes it a non-ivar
3260 // writer-barrier conservatively.
3261 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
3262 if (ExpTy->isRecordType()) {
3263 LV.setObjCIvar(false);
3264 return;
3265 }
3266 }
3267 LV.setObjCIvar(true);
3268 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
3269 LV.setBaseIvarExp(Exp->getBase());
3270 LV.setObjCArray(E->getType()->isArrayType());
3271 return;
3272 }
3273
3274 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
3275 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
3276 if (VD->hasGlobalStorage()) {
3277 LV.setGlobalObjCRef(true);
3278 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
3279 }
3280 }
3281 LV.setObjCArray(E->getType()->isArrayType());
3282 return;
3283 }
3284
3285 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
3286 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3287 return;
3288 }
3289
3290 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
3291 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3292 if (LV.isObjCIvar()) {
3293 // If cast is to a structure pointer, follow gcc's behavior and make it
3294 // a non-ivar write-barrier.
3295 QualType ExpTy = E->getType();
3296 if (ExpTy->isPointerType())
3297 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
3298 if (ExpTy->isRecordType())
3299 LV.setObjCIvar(false);
3300 }
3301 return;
3302 }
3303
3304 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
3305 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
3306 return;
3307 }
3308
3309 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
3310 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3311 return;
3312 }
3313
3314 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
3315 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3316 return;
3317 }
3318
3319 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
3320 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3321 return;
3322 }
3323
3324 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
3325 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
3326 if (LV.isObjCIvar() && !LV.isObjCArray())
3327 // Using array syntax to assigning to what an ivar points to is not
3328 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
3329 LV.setObjCIvar(false);
3330 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
3331 // Using array syntax to assigning to what global points to is not
3332 // same as assigning to the global itself. {id *G;} G[i] = 0;
3333 LV.setGlobalObjCRef(false);
3334 return;
3335 }
3336
3337 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
3338 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
3339 // We don't know if member is an 'ivar', but this flag is looked at
3340 // only in the context of LV.isObjCIvar().
3341 LV.setObjCArray(E->getType()->isArrayType());
3342 return;
3343 }
3344}
3345
3347 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
3348 llvm::Type *RealVarTy, SourceLocation Loc) {
3349 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
3351 CGF, VD, Addr, Loc);
3352 else
3353 Addr =
3354 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
3355
3356 Addr = Addr.withElementType(RealVarTy);
3358}
3359
3361 const VarDecl *VD, QualType T) {
3362 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3363 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3364 // Always return an invalid address for MT_Local, and also for
3365 // MT_To/MT_Enter when unified memory is not enabled. These use direct
3366 // access (global exists in device image). Otherwise, return a valid
3367 // address.
3368 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Local ||
3369 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3370 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3372 return Address::invalid();
3373 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
3374 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3375 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3377 "Expected link clause OR to clause with unified memory enabled.");
3378 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
3380 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
3381}
3382
3383Address
3385 LValueBaseInfo *PointeeBaseInfo,
3386 TBAAAccessInfo *PointeeTBAAInfo) {
3387 llvm::LoadInst *Load =
3388 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
3389 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
3390 QualType PTy = RefLVal.getType()->getPointeeType();
3391 CharUnits Align = CGM.getNaturalTypeAlignment(
3392 PTy, PointeeBaseInfo, PointeeTBAAInfo, /*ForPointeeType=*/true);
3393 if (!PTy->isIncompleteType()) {
3394 llvm::LLVMContext &Ctx = getLLVMContext();
3395 llvm::MDBuilder MDB(Ctx);
3396 // Emit !nonnull metadata
3397 if (CGM.getTypes().getTargetAddressSpace(PTy) == 0 &&
3398 !CGM.getCodeGenOpts().NullPointerIsValid)
3399 Load->setMetadata(llvm::LLVMContext::MD_nonnull,
3400 llvm::MDNode::get(Ctx, {}));
3401 // Emit !align metadata
3402 if (PTy->isObjectType()) {
3403 auto AlignVal = Align.getQuantity();
3404 if (AlignVal > 1) {
3405 Load->setMetadata(
3406 llvm::LLVMContext::MD_align,
3407 llvm::MDNode::get(Ctx, MDB.createConstant(llvm::ConstantInt::get(
3408 Builder.getInt64Ty(), AlignVal))));
3409 }
3410 }
3411 }
3412 return makeNaturalAddressForPointer(Load, PTy, Align,
3413 /*ForPointeeType=*/true, PointeeBaseInfo,
3414 PointeeTBAAInfo);
3415}
3416
3418 LValueBaseInfo PointeeBaseInfo;
3419 TBAAAccessInfo PointeeTBAAInfo;
3420 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
3421 &PointeeTBAAInfo);
3422 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
3423 PointeeBaseInfo, PointeeTBAAInfo);
3424}
3425
3427 const PointerType *PtrTy,
3428 LValueBaseInfo *BaseInfo,
3429 TBAAAccessInfo *TBAAInfo) {
3430 llvm::Value *Addr = Builder.CreateLoad(Ptr);
3431 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
3432 CharUnits(), /*ForPointeeType=*/true,
3433 BaseInfo, TBAAInfo);
3434}
3435
3437 const PointerType *PtrTy) {
3438 LValueBaseInfo BaseInfo;
3439 TBAAAccessInfo TBAAInfo;
3440 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
3441 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
3442}
3443
3445 const Expr *E, const VarDecl *VD) {
3446 QualType T = E->getType();
3447
3448 // If it's thread_local, emit a call to its wrapper function instead.
3449 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3451 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
3452 // Check if the variable is marked as declare target with link clause in
3453 // device codegen.
3454 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
3456 if (Addr.isValid())
3458 }
3459
3460 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
3461
3462 if (VD->getTLSKind() != VarDecl::TLS_None)
3463 V = CGF.Builder.CreateThreadLocalAddress(V);
3464
3465 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
3466 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
3467 Address Addr(V, RealVarTy, Alignment);
3468 // Emit reference to the private copy of the variable if it is an OpenMP
3469 // threadprivate variable.
3470 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
3471 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3472 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
3473 E->getExprLoc());
3474 }
3475 LValue LV = VD->getType()->isReferenceType() ?
3479 setObjCGCLValueClass(CGF.getContext(), E, LV);
3480 return LV;
3481}
3482
3484 llvm::Type *Ty) {
3485 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3486 if (FD->hasAttr<WeakRefAttr>()) {
3488 return aliasee.getPointer();
3489 }
3490
3491 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
3492 return V;
3493}
3494
3495static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
3496 GlobalDecl GD) {
3497 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3498 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
3499 QualType ETy = E->getType();
3501 if (auto *GV = dyn_cast<llvm::GlobalValue>(V))
3502 V = llvm::NoCFIValue::get(GV);
3503 }
3504 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
3505 return CGF.MakeAddrLValue(V, ETy, Alignment, AlignmentSource::Decl);
3506}
3507
3509 llvm::Value *ThisValue) {
3510
3511 return CGF.EmitLValueForLambdaField(FD, ThisValue);
3512}
3513
3514/// Named Registers are named metadata pointing to the register name
3515/// which will be read from/written to as an argument to the intrinsic
3516/// @llvm.read/write_register.
3517/// So far, only the name is being passed down, but other options such as
3518/// register type, allocation type or even optimization options could be
3519/// passed down via the metadata node.
3520static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
3521 SmallString<64> Name("llvm.named.register.");
3522 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
3523 assert(Asm->getLabel().size() < 64-Name.size() &&
3524 "Register name too big");
3525 Name.append(Asm->getLabel());
3526 llvm::NamedMDNode *M =
3527 CGM.getModule().getOrInsertNamedMetadata(Name);
3528 if (M->getNumOperands() == 0) {
3529 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
3530 Asm->getLabel());
3531 llvm::Metadata *Ops[] = {Str};
3532 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
3533 }
3534
3535 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
3536
3537 llvm::Value *Ptr =
3538 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
3539 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
3540}
3541
3542/// Determine whether we can emit a reference to \p VD from the current
3543/// context, despite not necessarily having seen an odr-use of the variable in
3544/// this context.
3546 const DeclRefExpr *E,
3547 const VarDecl *VD) {
3548 // For a variable declared in an enclosing scope, do not emit a spurious
3549 // reference even if we have a capture, as that will emit an unwarranted
3550 // reference to our capture state, and will likely generate worse code than
3551 // emitting a local copy.
3553 return false;
3554
3555 // For a local declaration declared in this function, we can always reference
3556 // it even if we don't have an odr-use.
3557 if (VD->hasLocalStorage()) {
3558 return VD->getDeclContext() ==
3559 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
3560 }
3561
3562 // For a global declaration, we can emit a reference to it if we know
3563 // for sure that we are able to emit a definition of it.
3564 VD = VD->getDefinition(CGF.getContext());
3565 if (!VD)
3566 return false;
3567
3568 // Don't emit a spurious reference if it might be to a variable that only
3569 // exists on a different device / target.
3570 // FIXME: This is unnecessarily broad. Check whether this would actually be a
3571 // cross-target reference.
3572 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3573 CGF.getLangOpts().OpenCL) {
3574 return false;
3575 }
3576
3577 // We can emit a spurious reference only if the linkage implies that we'll
3578 // be emitting a non-interposable symbol that will be retained until link
3579 // time.
3580 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3581 case llvm::GlobalValue::ExternalLinkage:
3582 case llvm::GlobalValue::LinkOnceODRLinkage:
3583 case llvm::GlobalValue::WeakODRLinkage:
3584 case llvm::GlobalValue::InternalLinkage:
3585 case llvm::GlobalValue::PrivateLinkage:
3586 return true;
3587 default:
3588 return false;
3589 }
3590}
3591
3593 const NamedDecl *ND = E->getDecl();
3594 QualType T = E->getType();
3595
3596 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3597 "should not emit an unevaluated operand");
3598
3599 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3600 // Global Named registers access via intrinsics only
3601 if (VD->getStorageClass() == SC_Register &&
3602 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3603 return EmitGlobalNamedRegister(VD, CGM);
3604
3605 // If this DeclRefExpr does not constitute an odr-use of the variable,
3606 // we're not permitted to emit a reference to it in general, and it might
3607 // not be captured if capture would be necessary for a use. Emit the
3608 // constant value directly instead.
3609 if (E->isNonOdrUse() == NOUR_Constant &&
3610 (VD->getType()->isReferenceType() ||
3611 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3612 VD->getAnyInitializer(VD);
3613 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3614 E->getLocation(), *VD->evaluateValue(), VD->getType());
3615 assert(Val && "failed to emit constant expression");
3616
3618 if (!VD->getType()->isReferenceType()) {
3619 // Spill the constant value to a global.
3620 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3621 getContext().getDeclAlign(VD));
3622 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3623 auto *PTy = llvm::PointerType::get(
3624 getLLVMContext(), getTypes().getTargetAddressSpace(VD->getType()));
3625 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3626 } else {
3627 // Should we be using the alignment of the constant pointer we emitted?
3628 CharUnits Alignment =
3629 CGM.getNaturalTypeAlignment(E->getType(),
3630 /* BaseInfo= */ nullptr,
3631 /* TBAAInfo= */ nullptr,
3632 /* forPointeeType= */ true);
3633 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3634 }
3636 }
3637
3638 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3639
3640 // Check for captured variables.
3642 VD = VD->getCanonicalDecl();
3643 if (auto *FD = LambdaCaptureFields.lookup(VD))
3644 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3645 if (CapturedStmtInfo) {
3646 auto I = LocalDeclMap.find(VD);
3647 if (I != LocalDeclMap.end()) {
3648 LValue CapLVal;
3649 if (VD->getType()->isReferenceType())
3650 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3652 else
3653 CapLVal = MakeAddrLValue(I->second, T);
3654 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3655 // in simd context.
3656 if (getLangOpts().OpenMP &&
3657 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3658 CapLVal.setNontemporal(/*Value=*/true);
3659 return CapLVal;
3660 }
3661 LValue CapLVal =
3662 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
3663 CapturedStmtInfo->getContextValue());
3664 Address LValueAddress = CapLVal.getAddress();
3665 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3666 LValueAddress.getElementType(),
3667 getContext().getDeclAlign(VD)),
3668 CapLVal.getType(),
3670 CapLVal.getTBAAInfo());
3671 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3672 // in simd context.
3673 if (getLangOpts().OpenMP &&
3674 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3675 CapLVal.setNontemporal(/*Value=*/true);
3676 return CapLVal;
3677 }
3678
3679 assert(isa<BlockDecl>(CurCodeDecl));
3680 Address addr = GetAddrOfBlockDecl(VD);
3681 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3682 }
3683 }
3684
3685 // FIXME: We should be able to assert this for FunctionDecls as well!
3686 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3687 // those with a valid source location.
3688 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3689 !E->getLocation().isValid()) &&
3690 "Should not use decl without marking it used!");
3691
3692 if (ND->hasAttr<WeakRefAttr>()) {
3693 const auto *VD = cast<ValueDecl>(ND);
3694 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3695 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3696 }
3697
3698 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3699 // Check if this is a global variable.
3700 if (VD->hasLinkage() || VD->isStaticDataMember())
3701 return EmitGlobalVarDeclLValue(*this, E, VD);
3702
3703 Address addr = Address::invalid();
3704
3705 // The variable should generally be present in the local decl map.
3706 auto iter = LocalDeclMap.find(VD);
3707 if (iter != LocalDeclMap.end()) {
3708 addr = iter->second;
3709
3710 // Otherwise, it might be static local we haven't emitted yet for
3711 // some reason; most likely, because it's in an outer function.
3712 } else if (VD->isStaticLocal()) {
3713 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3714 *VD, CGM.getLLVMLinkageVarDefinition(VD));
3715 addr = Address(
3716 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3717
3718 // No other cases for now.
3719 } else {
3720 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3721 }
3722
3723 // Handle threadlocal function locals.
3724 if (VD->getTLSKind() != VarDecl::TLS_None)
3725 addr = addr.withPointer(
3726 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3728
3729 // Check for OpenMP threadprivate variables.
3730 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3731 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3733 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3734 E->getExprLoc());
3735 }
3736
3737 // Drill into block byref variables.
3738 bool isBlockByref = VD->isEscapingByref();
3739 if (isBlockByref) {
3740 addr = emitBlockByrefAddress(addr, VD);
3741 }
3742
3743 // Drill into reference types.
3744 LValue LV = VD->getType()->isReferenceType() ?
3747
3748 bool isLocalStorage = VD->hasLocalStorage();
3749
3750 bool NonGCable = isLocalStorage &&
3751 !VD->getType()->isReferenceType() &&
3752 !isBlockByref;
3753 if (NonGCable) {
3755 LV.setNonGC(true);
3756 }
3757
3758 bool isImpreciseLifetime =
3759 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3760 if (isImpreciseLifetime)
3763 return LV;
3764 }
3765
3766 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3767 return EmitFunctionDeclLValue(*this, E, FD);
3768
3769 // FIXME: While we're emitting a binding from an enclosing scope, all other
3770 // DeclRefExprs we see should be implicitly treated as if they also refer to
3771 // an enclosing scope.
3772 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3774 auto *FD = LambdaCaptureFields.lookup(BD);
3775 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3776 }
3777 // Suppress debug location updates when visiting the binding, since the
3778 // binding may emit instructions that would otherwise be associated with the
3779 // binding itself, rather than the expression referencing the binding. (this
3780 // leads to jumpy debug stepping behavior where the location/debugger jump
3781 // back to the binding declaration, then back to the expression referencing
3782 // the binding)
3784 return EmitLValue(BD->getBinding(), NotKnownNonNull);
3785 }
3786
3787 // We can form DeclRefExprs naming GUID declarations when reconstituting
3788 // non-type template parameters into expressions.
3789 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3790 return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3792
3793 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3794 ConstantAddress ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3795 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3796
3797 if (AS != T.getAddressSpace()) {
3798 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3799 llvm::Type *PtrTy =
3800 llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3801 llvm::Constant *ASC = CGM.performAddrSpaceCast(ATPO.getPointer(), PtrTy);
3802 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3803 }
3804
3805 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3806 }
3807
3808 llvm_unreachable("Unhandled DeclRefExpr");
3809}
3810
3812 // __extension__ doesn't affect lvalue-ness.
3813 if (E->getOpcode() == UO_Extension)
3814 return EmitLValue(E->getSubExpr());
3815
3817 switch (E->getOpcode()) {
3818 default: llvm_unreachable("Unknown unary operator lvalue!");
3819 case UO_Deref: {
3821 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3822
3823 LValueBaseInfo BaseInfo;
3824 TBAAAccessInfo TBAAInfo;
3826 &TBAAInfo);
3827 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3829
3830 // We should not generate __weak write barrier on indirect reference
3831 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3832 // But, we continue to generate __strong write barrier on indirect write
3833 // into a pointer to object.
3834 if (getLangOpts().ObjC &&
3835 getLangOpts().getGC() != LangOptions::NonGC &&
3836 LV.isObjCWeak())
3838 return LV;
3839 }
3840 case UO_Real:
3841 case UO_Imag: {
3842 LValue LV = EmitLValue(E->getSubExpr());
3843 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3844
3845 // __real is valid on scalars. This is a faster way of testing that.
3846 // __imag can only produce an rvalue on scalars.
3847 if (E->getOpcode() == UO_Real &&
3848 !LV.getAddress().getElementType()->isStructTy()) {
3849 assert(E->getSubExpr()->getType()->isArithmeticType());
3850 return LV;
3851 }
3852
3853 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3854
3855 Address Component =
3856 (E->getOpcode() == UO_Real
3859 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3860 CGM.getTBAAInfoForSubobject(LV, T));
3861 ElemLV.getQuals().addQualifiers(LV.getQuals());
3862 return ElemLV;
3863 }
3864 case UO_PreInc:
3865 case UO_PreDec: {
3866 LValue LV = EmitLValue(E->getSubExpr());
3867 bool isInc = E->getOpcode() == UO_PreInc;
3868
3869 if (E->getType()->isAnyComplexType())
3870 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3871 else
3872 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3873 return LV;
3874 }
3875 }
3876}
3877
3879 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3881}
3882
3884 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3886}
3887
3889 auto SL = E->getFunctionName();
3890 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3891 StringRef FnName = CurFn->getName();
3892 FnName.consume_front("\01");
3893 StringRef NameItems[] = {
3895 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3896 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3897 std::string Name = std::string(SL->getString());
3898 if (!Name.empty()) {
3899 unsigned Discriminator =
3900 CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3901 if (Discriminator)
3902 Name += "_" + Twine(Discriminator + 1).str();
3903 auto C = CGM.GetAddrOfConstantCString(Name, GVName);
3905 } else {
3906 auto C = CGM.GetAddrOfConstantCString(std::string(FnName), GVName);
3908 }
3909 }
3910 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3912}
3913
3914/// Emit a type description suitable for use by a runtime sanitizer library. The
3915/// format of a type descriptor is
3916///
3917/// \code
3918/// { i16 TypeKind, i16 TypeInfo }
3919/// \endcode
3920///
3921/// followed by an array of i8 containing the type name with extra information
3922/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3923/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3924/// anything else.
3926 // Only emit each type's descriptor once.
3927 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3928 return C;
3929
3930 uint16_t TypeKind = TK_Unknown;
3931 uint16_t TypeInfo = 0;
3932 bool IsBitInt = false;
3933
3934 if (T->isIntegerType()) {
3935 TypeKind = TK_Integer;
3936 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3937 (T->isSignedIntegerType() ? 1 : 0);
3938 // Follow suggestion from discussion of issue 64100.
3939 // So we can write the exact amount of bits in TypeName after '\0'
3940 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3941 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3942 // Do a sanity checks as we are using 32-bit type to store bit length.
3943 assert(getContext().getTypeSize(T) > 0 &&
3944 " non positive amount of bits in __BitInt type");
3945 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3946 " too many bits in __BitInt type");
3947
3948 // Redefine TypeKind with the actual __BitInt type if we have signed
3949 // BitInt.
3950 TypeKind = TK_BitInt;
3951 IsBitInt = true;
3952 }
3953 } else if (T->isFloatingType()) {
3954 TypeKind = TK_Float;
3956 }
3957
3958 // Format the type name as if for a diagnostic, including quotes and
3959 // optionally an 'aka'.
3960 SmallString<32> Buffer;
3961 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
3962 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3963 StringRef(), {}, Buffer, {});
3964
3965 if (IsBitInt) {
3966 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3967 // endianness, zero.
3968 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3969 const auto *EIT = T->castAs<BitIntType>();
3970 uint32_t Bits = EIT->getNumBits();
3971 llvm::support::endian::write32(S + 1, Bits,
3972 getTarget().isBigEndian()
3973 ? llvm::endianness::big
3974 : llvm::endianness::little);
3975 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3976 Buffer.append(Str);
3977 }
3978
3979 llvm::Constant *Components[] = {
3980 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3981 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3982 };
3983 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3984
3985 auto *GV = new llvm::GlobalVariable(
3986 CGM.getModule(), Descriptor->getType(),
3987 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3988 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3989 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3990
3991 // Remember the descriptor for this type.
3992 CGM.setTypeDescriptorInMap(T, GV);
3993
3994 return GV;
3995}
3996
3997llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3998 llvm::Type *TargetTy = IntPtrTy;
3999
4000 if (V->getType() == TargetTy)
4001 return V;
4002
4003 // Floating-point types which fit into intptr_t are bitcast to integers
4004 // and then passed directly (after zero-extension, if necessary).
4005 if (V->getType()->isFloatingPointTy()) {
4006 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
4007 if (Bits <= TargetTy->getIntegerBitWidth())
4008 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
4009 Bits));
4010 }
4011
4012 // Integers which fit in intptr_t are zero-extended and passed directly.
4013 if (V->getType()->isIntegerTy() &&
4014 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
4015 return Builder.CreateZExt(V, TargetTy);
4016
4017 // Pointers are passed directly, everything else is passed by address.
4018 if (!V->getType()->isPointerTy()) {
4019 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
4020 Builder.CreateStore(V, Ptr);
4021 V = Ptr.getPointer();
4022 }
4023 return Builder.CreatePtrToInt(V, TargetTy);
4024}
4025
4026/// Emit a representation of a SourceLocation for passing to a handler
4027/// in a sanitizer runtime library. The format for this data is:
4028/// \code
4029/// struct SourceLocation {
4030/// const char *Filename;
4031/// int32_t Line, Column;
4032/// };
4033/// \endcode
4034/// For an invalid SourceLocation, the Filename pointer is null.
4036 llvm::Constant *Filename;
4037 int Line, Column;
4038
4040 if (PLoc.isValid()) {
4041 StringRef FilenameString = PLoc.getFilename();
4042
4043 int PathComponentsToStrip =
4044 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
4045 if (PathComponentsToStrip < 0) {
4046 assert(PathComponentsToStrip != INT_MIN);
4047 int PathComponentsToKeep = -PathComponentsToStrip;
4048 auto I = llvm::sys::path::rbegin(FilenameString);
4049 auto E = llvm::sys::path::rend(FilenameString);
4050 while (I != E && --PathComponentsToKeep)
4051 ++I;
4052
4053 FilenameString = FilenameString.substr(I - E);
4054 } else if (PathComponentsToStrip > 0) {
4055 auto I = llvm::sys::path::begin(FilenameString);
4056 auto E = llvm::sys::path::end(FilenameString);
4057 while (I != E && PathComponentsToStrip--)
4058 ++I;
4059
4060 if (I != E)
4061 FilenameString =
4062 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
4063 else
4064 FilenameString = llvm::sys::path::filename(FilenameString);
4065 }
4066
4067 auto FilenameGV =
4068 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
4069 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
4071 FilenameGV.getPointer()->stripPointerCasts()));
4072 Filename = FilenameGV.getPointer();
4073 Line = PLoc.getLine();
4074 Column = PLoc.getColumn();
4075 } else {
4076 Filename = llvm::Constant::getNullValue(Int8PtrTy);
4077 Line = Column = 0;
4078 }
4079
4080 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
4081 Builder.getInt32(Column)};
4082
4083 return llvm::ConstantStruct::getAnon(Data);
4084}
4085
4086namespace {
4087/// Specify under what conditions this check can be recovered
4088enum class CheckRecoverableKind {
4089 /// Always terminate program execution if this check fails.
4091 /// Check supports recovering, runtime has both fatal (noreturn) and
4092 /// non-fatal handlers for this check.
4093 Recoverable,
4094 /// Runtime conditionally aborts, always need to support recovery.
4096};
4097}
4098
4099static CheckRecoverableKind
4101 if (Ordinal == SanitizerKind::SO_Vptr)
4102 return CheckRecoverableKind::AlwaysRecoverable;
4103 else if (Ordinal == SanitizerKind::SO_Return ||
4104 Ordinal == SanitizerKind::SO_Unreachable)
4105 return CheckRecoverableKind::Unrecoverable;
4106 else
4107 return CheckRecoverableKind::Recoverable;
4108}
4109
4110namespace {
4111struct SanitizerHandlerInfo {
4112 char const *const Name;
4113 unsigned Version;
4114};
4115}
4116
4117const SanitizerHandlerInfo SanitizerHandlers[] = {
4118#define SANITIZER_CHECK(Enum, Name, Version, Msg) {#Name, Version},
4120#undef SANITIZER_CHECK
4121};
4122
4124 llvm::FunctionType *FnType,
4126 SanitizerHandler CheckHandler,
4127 CheckRecoverableKind RecoverKind, bool IsFatal,
4128 llvm::BasicBlock *ContBB, bool NoMerge) {
4129 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
4130 std::optional<ApplyDebugLocation> DL;
4131 if (!CGF.Builder.getCurrentDebugLocation()) {
4132 // Ensure that the call has at least an artificial debug location.
4133 DL.emplace(CGF, SourceLocation());
4134 }
4135 bool NeedsAbortSuffix =
4136 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
4137 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
4138 bool HandlerPreserveAllRegs =
4139 CGF.CGM.getCodeGenOpts().SanitizeHandlerPreserveAllRegs;
4140 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
4141 const StringRef CheckName = CheckInfo.Name;
4142 std::string FnName = "__ubsan_handle_" + CheckName.str();
4143 if (CheckInfo.Version && !MinimalRuntime)
4144 FnName += "_v" + llvm::utostr(CheckInfo.Version);
4145 if (MinimalRuntime)
4146 FnName += "_minimal";
4147 if (NeedsAbortSuffix)
4148 FnName += "_abort";
4149 if (HandlerPreserveAllRegs && !NeedsAbortSuffix)
4150 FnName += "_preserve";
4151 bool MayReturn =
4152 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
4153
4154 llvm::AttrBuilder B(CGF.getLLVMContext());
4155 if (!MayReturn) {
4156 B.addAttribute(llvm::Attribute::NoReturn)
4157 .addAttribute(llvm::Attribute::NoUnwind);
4158 }
4159 B.addUWTableAttr(llvm::UWTableKind::Default);
4160
4161 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
4162 FnType, FnName,
4163 llvm::AttributeList::get(CGF.getLLVMContext(),
4164 llvm::AttributeList::FunctionIndex, B),
4165 /*Local=*/true);
4166 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
4167 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().isOptimizedBuild() ||
4168 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4169 if (NoMerge)
4170 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
4171 if (HandlerPreserveAllRegs && !NeedsAbortSuffix) {
4172 // N.B. there is also a clang::CallingConv which is not what we want here.
4173 HandlerCall->setCallingConv(llvm::CallingConv::PreserveAll);
4174 }
4175 if (!MayReturn) {
4176 HandlerCall->setDoesNotReturn();
4177 CGF.Builder.CreateUnreachable();
4178 } else {
4179 CGF.Builder.CreateBr(ContBB);
4180 }
4181}
4182
4184 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
4185 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
4186 ArrayRef<llvm::Value *> DynamicArgs, const TrapReason *TR) {
4187 assert(IsSanitizerScope);
4188 assert(Checked.size() > 0);
4189 assert(CheckHandler >= 0 &&
4190 size_t(CheckHandler) < std::size(SanitizerHandlers));
4191 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
4192
4193 llvm::Value *FatalCond = nullptr;
4194 llvm::Value *RecoverableCond = nullptr;
4195 llvm::Value *TrapCond = nullptr;
4196 bool NoMerge = false;
4197 // Expand checks into:
4198 // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
4199 // We need separate allow_ubsan_check intrinsics because they have separately
4200 // specified cutoffs.
4201 // This expression looks expensive but will be simplified after
4202 // LowerAllowCheckPass.
4203 for (auto &[Check, Ord] : Checked) {
4204 llvm::Value *GuardedCheck = Check;
4206 (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
4207 llvm::Value *Allow = Builder.CreateCall(
4208 CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
4209 llvm::ConstantInt::get(CGM.Int8Ty, Ord));
4210 GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
4211 }
4212
4213 // -fsanitize-trap= overrides -fsanitize-recover=.
4214 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
4215 : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
4216 ? RecoverableCond
4217 : FatalCond;
4218 Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
4219
4220 if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
4221 NoMerge = true;
4222 }
4223
4224 if (TrapCond)
4225 EmitTrapCheck(TrapCond, CheckHandler, NoMerge, TR);
4226 if (!FatalCond && !RecoverableCond)
4227 return;
4228
4229 llvm::Value *JointCond;
4230 if (FatalCond && RecoverableCond)
4231 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
4232 else
4233 JointCond = FatalCond ? FatalCond : RecoverableCond;
4234 assert(JointCond);
4235
4236 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
4237 assert(SanOpts.has(Checked[0].second));
4238#ifndef NDEBUG
4239 for (int i = 1, n = Checked.size(); i < n; ++i) {
4240 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
4241 "All recoverable kinds in a single check must be same!");
4242 assert(SanOpts.has(Checked[i].second));
4243 }
4244#endif
4245
4246 llvm::BasicBlock *Cont = createBasicBlock("cont");
4247 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
4248 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
4249 // Give hint that we very much don't expect to execute the handler
4250 llvm::MDBuilder MDHelper(getLLVMContext());
4251 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4252 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
4253 EmitBlock(Handlers);
4254
4255 // Clear arguments for the MinimalRuntime handler.
4256 if (CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
4257 StaticArgs = {};
4258 DynamicArgs = {};
4259 }
4260
4261 // Handler functions take an i8* pointing to the (handler-specific) static
4262 // information block, followed by a sequence of intptr_t arguments
4263 // representing operand values.
4266
4267 Args.reserve(DynamicArgs.size() + 1);
4268 ArgTypes.reserve(DynamicArgs.size() + 1);
4269
4270 // Emit handler arguments and create handler function type.
4271 if (!StaticArgs.empty()) {
4272 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
4273 auto *InfoPtr = new llvm::GlobalVariable(
4274 CGM.getModule(), Info->getType(),
4275 // Non-constant global is used in a handler to deduplicate reports.
4276 // TODO: change deduplication logic and make it constant.
4277 /*isConstant=*/false, llvm::GlobalVariable::PrivateLinkage, Info, "",
4278 nullptr, llvm::GlobalVariable::NotThreadLocal,
4279 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
4280 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4281 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
4282 Args.push_back(InfoPtr);
4283 ArgTypes.push_back(Args.back()->getType());
4284 }
4285
4286 for (llvm::Value *DynamicArg : DynamicArgs) {
4287 Args.push_back(EmitCheckValue(DynamicArg));
4288 ArgTypes.push_back(IntPtrTy);
4289 }
4290
4291 llvm::FunctionType *FnType =
4292 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
4293
4294 if (!FatalCond || !RecoverableCond) {
4295 // Simple case: we need to generate a single handler call, either
4296 // fatal, or non-fatal.
4297 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
4298 (FatalCond != nullptr), Cont, NoMerge);
4299 } else {
4300 // Emit two handler calls: first one for set of unrecoverable checks,
4301 // another one for recoverable.
4302 llvm::BasicBlock *NonFatalHandlerBB =
4303 createBasicBlock("non_fatal." + CheckName);
4304 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
4305 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
4306 EmitBlock(FatalHandlerBB);
4307 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
4308 NonFatalHandlerBB, NoMerge);
4309 EmitBlock(NonFatalHandlerBB);
4310 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
4311 Cont, NoMerge);
4312 }
4313
4314 EmitBlock(Cont);
4315}
4316
4318 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
4319 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4320 ArrayRef<llvm::Constant *> StaticArgs) {
4321 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
4322
4323 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
4324 llvm::CondBrInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
4325
4326 llvm::MDBuilder MDHelper(getLLVMContext());
4327 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4328 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
4329
4330 EmitBlock(CheckBB);
4331
4332 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
4333
4334 llvm::CallInst *CheckCall;
4335 llvm::FunctionCallee SlowPathFn;
4336 if (WithDiag) {
4337 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
4338 auto *InfoPtr =
4339 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
4340 llvm::GlobalVariable::PrivateLinkage, Info);
4341 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4342 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
4343
4344 SlowPathFn = CGM.getModule().getOrInsertFunction(
4345 "__cfi_slowpath_diag",
4346 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
4347 false));
4348 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
4349 } else {
4350 SlowPathFn = CGM.getModule().getOrInsertFunction(
4351 "__cfi_slowpath",
4352 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
4353 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
4354 }
4355
4356 CGM.setDSOLocal(
4357 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
4358 CheckCall->setDoesNotThrow();
4359
4360 EmitBlock(Cont);
4361}
4362
4363// Emit a stub for __cfi_check function so that the linker knows about this
4364// symbol in LTO mode.
4366 llvm::Module *M = &CGM.getModule();
4367 ASTContext &C = getContext();
4368 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
4369
4370 auto *ArgCallsiteTypeId =
4372 auto *ArgAddr =
4374 auto *ArgCFICheckFailData =
4376 FunctionArgList FnArgs{ArgCallsiteTypeId, ArgAddr, ArgCFICheckFailData};
4377 const CGFunctionInfo &FI =
4378 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
4379
4380 llvm::Function *F = llvm::Function::Create(
4381 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
4382 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
4383 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4384 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4385 F->setAlignment(llvm::Align(4096));
4386 CGM.setDSOLocal(F);
4387
4388 llvm::LLVMContext &Ctx = M->getContext();
4389 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
4390 // CrossDSOCFI pass is not executed if there is no executable code.
4391 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
4392 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
4393 llvm::ReturnInst::Create(Ctx, nullptr, BB);
4394}
4395
4396// This function is basically a switch over the CFI failure kind, which is
4397// extracted from CFICheckFailData (1st function argument). Each case is either
4398// llvm.trap or a call to one of the two runtime handlers, based on
4399// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
4400// failure kind) traps, but this should really never happen. CFICheckFailData
4401// can be nullptr if the calling module has -fsanitize-trap behavior for this
4402// check kind; in this case __cfi_check_fail traps as well.
4404 auto CheckHandler = SanitizerHandler::CFICheckFail;
4405 // TODO: the SanitizerKind is not yet determined for this check (and might
4406 // not even be available, if Data == nullptr). However, we still want to
4407 // annotate the instrumentation. We approximate this by using all the CFI
4408 // kinds.
4409 SanitizerDebugLocation SanScope(
4410 this,
4411 {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall,
4412 SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast,
4413 SanitizerKind::SO_CFIICall},
4414 CheckHandler);
4415 auto *ArgData = ImplicitParamDecl::Create(
4417 auto *ArgAddr = ImplicitParamDecl::Create(
4419
4420 FunctionArgList Args{ArgData, ArgAddr};
4421 const CGFunctionInfo &FI =
4422 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
4423
4424 llvm::Function *F = llvm::Function::Create(
4425 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
4426 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
4427
4428 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4429 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4430 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
4431
4432 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
4433 SourceLocation());
4434
4436
4437 // This function is not affected by NoSanitizeList. This function does
4438 // not have a source location, but "src:*" would still apply. Revert any
4439 // changes to SanOpts made in StartFunction.
4440 SanOpts = CGM.getLangOpts().Sanitize;
4441
4442 llvm::Value *Data =
4443 EmitLoadOfScalar(GetAddrOfLocalVar(ArgData), /*Volatile=*/false,
4444 CGM.getContext().VoidPtrTy, ArgData->getLocation());
4445 llvm::Value *Addr =
4446 EmitLoadOfScalar(GetAddrOfLocalVar(ArgAddr), /*Volatile=*/false,
4447 CGM.getContext().VoidPtrTy, ArgAddr->getLocation());
4448
4449 // Data == nullptr means the calling module has trap behaviour for this check.
4450 llvm::Value *DataIsNotNullPtr =
4451 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
4452 // TODO: since there is no data, we don't know the CheckKind, and therefore
4453 // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to
4454 // NoMerge = false. Users can disable merging by disabling optimization.
4455 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail,
4456 /*NoMerge=*/false);
4457
4458 llvm::StructType *SourceLocationTy =
4459 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
4460 llvm::StructType *CfiCheckFailDataTy =
4461 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
4462
4463 llvm::Value *V = Builder.CreateConstGEP2_32(
4464 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, DefaultPtrTy), 0, 0);
4465
4466 Address CheckKindAddr(V, Int8Ty, getIntAlign());
4467 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
4468
4469 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
4470 CGM.getLLVMContext(),
4471 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
4472 llvm::Value *ValidVtable = Builder.CreateZExt(
4473 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
4474 {Addr, AllVtables}),
4475 IntPtrTy);
4476
4477 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
4478 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
4479 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
4480 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
4481 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
4482 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
4483
4484 for (auto CheckKindOrdinalPair : CheckKinds) {
4485 int Kind = CheckKindOrdinalPair.first;
4486 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
4487
4488 // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of
4489 // relying on the SanitizerScope with all CFI ordinals
4490
4491 llvm::Value *Cond =
4492 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
4493 if (CGM.getLangOpts().Sanitize.has(Ordinal))
4494 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
4495 {}, {Data, Addr, ValidVtable});
4496 else
4497 // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers.
4498 // Although the compiler allows SanitizeMergeHandlers to be set
4499 // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp
4500 // requires that SanitizeMergeHandlers is a subset of Sanitize.
4501 EmitTrapCheck(Cond, CheckHandler, /*NoMerge=*/false);
4502 }
4503
4505 // The only reference to this function will be created during LTO link.
4506 // Make sure it survives until then.
4507 CGM.addUsedGlobal(F);
4508}
4509
4511 if (SanOpts.has(SanitizerKind::Unreachable)) {
4512 auto CheckOrdinal = SanitizerKind::SO_Unreachable;
4513 auto CheckHandler = SanitizerHandler::BuiltinUnreachable;
4514 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4515 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
4516 CheckOrdinal),
4517 CheckHandler, EmitCheckSourceLocation(Loc), {});
4518 }
4519 Builder.CreateUnreachable();
4520}
4521
4522void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
4523 SanitizerHandler CheckHandlerID,
4524 bool NoMerge, const TrapReason *TR) {
4525 llvm::BasicBlock *Cont = createBasicBlock("cont");
4526
4527 // If we're optimizing, collapse all calls to trap down to just one per
4528 // check-type per function to save on code size.
4529 if ((int)TrapBBs.size() <= CheckHandlerID)
4530 TrapBBs.resize(CheckHandlerID + 1);
4531
4532 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
4533
4534 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
4535 llvm::StringRef TrapMessage;
4536 llvm::StringRef TrapCategory;
4537 auto DebugTrapReasonKind = CGM.getCodeGenOpts().getSanitizeDebugTrapReasons();
4538 if (TR && !TR->isEmpty() &&
4539 DebugTrapReasonKind ==
4541 TrapMessage = TR->getMessage();
4542 TrapCategory = TR->getCategory();
4543 } else {
4544 TrapMessage = GetUBSanTrapForHandler(CheckHandlerID);
4545 TrapCategory = "Undefined Behavior Sanitizer";
4546 }
4547
4548 if (getDebugInfo() && !TrapMessage.empty() &&
4549 DebugTrapReasonKind !=
4551 TrapLocation) {
4552 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
4553 TrapLocation, TrapCategory, TrapMessage);
4554 }
4555
4556 NoMerge = NoMerge || !CGM.getCodeGenOpts().isOptimizedBuild() ||
4557 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4558
4559 llvm::MDBuilder MDHelper(getLLVMContext());
4560 if (TrapBB && !NoMerge) {
4561 auto Call = TrapBB->begin();
4562 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
4563
4564 Call->applyMergedLocation(Call->getDebugLoc(), TrapLocation);
4565
4566 Builder.CreateCondBr(Checked, Cont, TrapBB,
4567 MDHelper.createLikelyBranchWeights());
4568 } else {
4569 TrapBB = createBasicBlock("trap");
4570 Builder.CreateCondBr(Checked, Cont, TrapBB,
4571 MDHelper.createLikelyBranchWeights());
4572 EmitBlock(TrapBB);
4573
4574 ApplyDebugLocation applyTrapDI(*this, TrapLocation);
4575
4576 llvm::CallInst *TrapCall;
4577 if (CGM.getCodeGenOpts().SanitizeTrapLoop)
4578 TrapCall =
4579 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::looptrap));
4580 else
4581 TrapCall = Builder.CreateCall(
4582 CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
4583 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
4584
4585 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4586 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4587 CGM.getCodeGenOpts().TrapFuncName);
4588 TrapCall->addFnAttr(A);
4589 }
4590 if (NoMerge)
4591 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4592 TrapCall->setDoesNotReturn();
4593 TrapCall->setDoesNotThrow();
4594 Builder.CreateUnreachable();
4595 }
4596
4597 EmitBlock(Cont);
4598}
4599
4600llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
4601 llvm::CallInst *TrapCall =
4602 Builder.CreateCall(CGM.getIntrinsic(IntrID));
4603
4604 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4605 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4606 CGM.getCodeGenOpts().TrapFuncName);
4607 TrapCall->addFnAttr(A);
4608 }
4609
4611 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4612 return TrapCall;
4613}
4614
4616 LValueBaseInfo *BaseInfo,
4617 TBAAAccessInfo *TBAAInfo) {
4618 assert(E->getType()->isArrayType() &&
4619 "Array to pointer decay must have array source type!");
4620
4621 // Expressions of array type can't be bitfields or vector elements.
4622 LValue LV = EmitLValue(E);
4623 Address Addr = LV.getAddress();
4624
4625 // If the array type was an incomplete type, we need to make sure
4626 // the decay ends up being the right type.
4627 llvm::Type *NewTy = ConvertType(E->getType());
4628 Addr = Addr.withElementType(NewTy);
4629
4630 // Note that VLA pointers are always decayed, so we don't need to do
4631 // anything here.
4632 if (!E->getType()->isVariableArrayType()) {
4633 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4634 "Expected pointer to array");
4635
4636 if (getLangOpts().EmitLogicalPointer) {
4637 // Array-to-pointer decay for an SGEP is a no-op as we don't do any
4638 // logical indexing. See #179951 for some additional context.
4639 auto *SGEP =
4640 Builder.CreateStructuredGEP(NewTy, Addr.emitRawPointer(*this), {});
4641 Addr = Address(SGEP, NewTy, Addr.getAlignment(), Addr.isKnownNonNull());
4642 } else {
4643 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4644 }
4645 }
4646
4647 // The result of this decay conversion points to an array element within the
4648 // base lvalue. However, since TBAA currently does not support representing
4649 // accesses to elements of member arrays, we conservatively represent accesses
4650 // to the pointee object as if it had no any base lvalue specified.
4651 // TODO: Support TBAA for member arrays.
4653 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4654 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4655
4656 return Addr.withElementType(ConvertTypeForMem(EltType));
4657}
4658
4659/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4660/// array to pointer, return the array subexpression.
4661static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4662 // If this isn't just an array->pointer decay, bail out.
4663 const auto *CE = dyn_cast<CastExpr>(E);
4664 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4665 return nullptr;
4666
4667 // If this is a decay from variable width array, bail out.
4668 const Expr *SubExpr = CE->getSubExpr();
4669 if (SubExpr->getType()->isVariableArrayType())
4670 return nullptr;
4671
4672 return SubExpr;
4673}
4674
4676 llvm::Type *elemType,
4677 llvm::Value *ptr,
4678 ArrayRef<llvm::Value*> indices,
4679 bool inbounds,
4680 bool signedIndices,
4681 SourceLocation loc,
4682 const llvm::Twine &name = "arrayidx") {
4683 if (inbounds && CGF.getLangOpts().EmitLogicalPointer)
4684 return CGF.Builder.CreateStructuredGEP(elemType, ptr, indices);
4685
4686 if (inbounds) {
4687 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4689 name);
4690 } else {
4691 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4692 }
4693}
4694
4697 llvm::Type *arrayType,
4698 llvm::Type *elementType, bool inbounds,
4699 bool signedIndices, SourceLocation loc,
4700 CharUnits align,
4701 const llvm::Twine &name = "arrayidx") {
4702 if (inbounds && CGF.getLangOpts().EmitLogicalPointer)
4703 return RawAddress(CGF.Builder.CreateStructuredGEP(arrayType,
4704 addr.emitRawPointer(CGF),
4705 indices.drop_front()),
4706 elementType, align);
4707
4708 if (inbounds) {
4709 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4711 align, name);
4712 } else {
4713 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4714 }
4715}
4716
4718 const VariableArrayType *vla) {
4719 QualType eltType;
4720 do {
4721 eltType = vla->getElementType();
4722 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4723 return eltType;
4724}
4725
4727 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4728}
4729
4730static bool hasBPFPreserveStaticOffset(const Expr *E) {
4731 if (!E)
4732 return false;
4733 QualType PointeeType = E->getType()->getPointeeType();
4734 if (PointeeType.isNull())
4735 return false;
4736 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4737 return hasBPFPreserveStaticOffset(BaseDecl);
4738 return false;
4739}
4740
4741// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4743 Address &Addr) {
4744 if (!CGF.getTarget().getTriple().isBPF())
4745 return Addr;
4746
4747 llvm::Function *Fn =
4748 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4749 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4750 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4751}
4752
4753/// Given an array base, check whether its member access belongs to a record
4754/// with preserve_access_index attribute or not.
4755static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4756 if (!ArrayBase || !CGF.getDebugInfo())
4757 return false;
4758
4759 // Only support base as either a MemberExpr or DeclRefExpr.
4760 // DeclRefExpr to cover cases like:
4761 // struct s { int a; int b[10]; };
4762 // struct s *p;
4763 // p[1].a
4764 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4765 // p->b[5] is a MemberExpr example.
4766 const Expr *E = ArrayBase->IgnoreImpCasts();
4767 if (const auto *ME = dyn_cast<MemberExpr>(E))
4768 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4769
4770 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4771 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4772 if (!VarDef)
4773 return false;
4774
4775 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4776 if (!PtrT)
4777 return false;
4778
4779 const auto *PointeeT = PtrT->getPointeeType()
4781 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4782 return RecT->getDecl()
4783 ->getMostRecentDecl()
4784 ->hasAttr<BPFPreserveAccessIndexAttr>();
4785 return false;
4786 }
4787
4788 return false;
4789}
4790
4793 QualType eltType, bool inbounds,
4794 bool signedIndices, SourceLocation loc,
4795 QualType *arrayType = nullptr,
4796 const Expr *Base = nullptr,
4797 const llvm::Twine &name = "arrayidx") {
4798 // All the indices except that last must be zero.
4799#ifndef NDEBUG
4800 for (auto *idx : indices.drop_back())
4801 assert(isa<llvm::ConstantInt>(idx) &&
4802 cast<llvm::ConstantInt>(idx)->isZero());
4803#endif
4804
4805 // Determine the element size of the statically-sized base. This is
4806 // the thing that the indices are expressed in terms of.
4807 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4808 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4809 }
4810
4811 // We can use that to compute the best alignment of the element.
4812 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4813 CharUnits eltAlign =
4814 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4815
4817 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4818
4819 llvm::Value *eltPtr;
4820 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4821 if (!LastIndex ||
4823 addr = emitArraySubscriptGEP(CGF, addr, indices,
4825 : nullptr,
4826 CGF.ConvertTypeForMem(eltType), inbounds,
4827 signedIndices, loc, eltAlign, name);
4828 return addr;
4829 } else {
4830 // Remember the original array subscript for bpf target
4831 unsigned idx = LastIndex->getZExtValue();
4832 llvm::DIType *DbgInfo = nullptr;
4833 if (arrayType)
4834 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4835 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4836 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4837 idx, DbgInfo);
4838 }
4839
4840 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4841}
4842
4843namespace {
4844
4845/// StructFieldAccess is a simple visitor class to grab the first l-value to
4846/// r-value cast Expr.
4847struct StructFieldAccess
4848 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
4849 const Expr *VisitCastExpr(const CastExpr *E) {
4850 if (E->getCastKind() == CK_LValueToRValue)
4851 return E;
4852 return Visit(E->getSubExpr());
4853 }
4854 const Expr *VisitParenExpr(const ParenExpr *E) {
4855 return Visit(E->getSubExpr());
4856 }
4857};
4858
4859} // end anonymous namespace
4860
4861/// The offset of a field from the beginning of the record.
4863 const FieldDecl *Field, int64_t &Offset) {
4864 ASTContext &Ctx = CGF.getContext();
4865 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4866 unsigned FieldNo = 0;
4867
4868 for (const FieldDecl *FD : RD->fields()) {
4869 if (FD == Field) {
4870 Offset += Layout.getFieldOffset(FieldNo);
4871 return true;
4872 }
4873
4874 QualType Ty = FD->getType();
4875 if (Ty->isRecordType())
4876 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4877 Offset += Layout.getFieldOffset(FieldNo);
4878 return true;
4879 }
4880
4881 if (!RD->isUnion())
4882 ++FieldNo;
4883 }
4884
4885 return false;
4886}
4887
4888/// Returns the relative offset difference between \p FD1 and \p FD2.
4889/// \code
4890/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4891/// \endcode
4892/// Both fields must be within the same struct.
4893static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4894 const FieldDecl *FD1,
4895 const FieldDecl *FD2) {
4896 const RecordDecl *FD1OuterRec =
4898 const RecordDecl *FD2OuterRec =
4900
4901 if (FD1OuterRec != FD2OuterRec)
4902 // Fields must be within the same RecordDecl.
4903 return std::optional<int64_t>();
4904
4905 int64_t FD1Offset = 0;
4906 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4907 return std::optional<int64_t>();
4908
4909 int64_t FD2Offset = 0;
4910 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4911 return std::optional<int64_t>();
4912
4913 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4914}
4915
4916/// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by"
4917/// attribute, generate bounds checking code. The "count" field is at the top
4918/// level of the struct or in an anonymous struct, that's also at the top level.
4919/// Future expansions may allow the "count" to reside at any place in the
4920/// struct, but the value of "counted_by" will be a "simple" path to the count,
4921/// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4922/// similar to emit the correct GEP.
4924 const Expr *ArrayExpr, QualType ArrayType, Address ArrayInst,
4925 QualType IndexType, llvm::Value *IndexVal, bool Accessed,
4926 bool FlexibleArray) {
4927 const auto *ME = dyn_cast<MemberExpr>(ArrayExpr->IgnoreImpCasts());
4928 if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType())
4929 return;
4930
4931 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4932 getLangOpts().getStrictFlexArraysLevel();
4933 if (FlexibleArray &&
4934 !ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel))
4935 return;
4936
4937 const FieldDecl *FD = cast<FieldDecl>(ME->getMemberDecl());
4938 const FieldDecl *CountFD = FD->findCountedByField();
4939 if (!CountFD)
4940 return;
4941
4942 if (std::optional<int64_t> Diff =
4943 getOffsetDifferenceInBits(*this, CountFD, FD)) {
4944 if (!ArrayInst.isValid()) {
4945 // An invalid Address indicates we're checking a pointer array access.
4946 // Emit the checked L-Value here.
4947 LValue LV = EmitCheckedLValue(ArrayExpr, TCK_MemberAccess);
4948 ArrayInst = LV.getAddress();
4949 }
4950
4951 // FIXME: The 'static_cast' is necessary, otherwise the result turns into a
4952 // uint64_t, which messes things up if we have a negative offset difference.
4953 Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth());
4954
4955 // Create a GEP with the byte offset between the counted object and the
4956 // count and use that to load the count value.
4957 ArrayInst = Builder.CreatePointerBitCastOrAddrSpaceCast(ArrayInst,
4958 Int8PtrTy, Int8Ty);
4959
4960 llvm::Type *BoundsType = ConvertType(CountFD->getType());
4961 llvm::Value *BoundsVal =
4962 Builder.CreateInBoundsGEP(Int8Ty, ArrayInst.emitRawPointer(*this),
4963 Builder.getInt32(*Diff), ".counted_by.gep");
4964 BoundsVal = Builder.CreateAlignedLoad(BoundsType, BoundsVal, getIntAlign(),
4965 ".counted_by.load");
4966
4967 // Now emit the bounds checking.
4968 EmitBoundsCheckImpl(ArrayExpr, ArrayType, IndexVal, IndexType, BoundsVal,
4969 CountFD->getType(), Accessed);
4970 }
4971}
4972
4974 bool Accessed) {
4975 // The index must always be an integer, which is not an aggregate. Emit it
4976 // in lexical order (this complexity is, sadly, required by C++17).
4977 llvm::Value *IdxPre =
4978 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4979 bool SignedIndices = false;
4980 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4981 auto *Idx = IdxPre;
4982 if (E->getLHS() != E->getIdx()) {
4983 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4984 Idx = EmitScalarExpr(E->getIdx());
4985 }
4986
4987 QualType IdxTy = E->getIdx()->getType();
4988 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4989 SignedIndices |= IdxSigned;
4990
4991 if (SanOpts.has(SanitizerKind::ArrayBounds))
4992 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4993
4994 // Extend or truncate the index type to 32 or 64-bits.
4995 if (Promote && Idx->getType() != IntPtrTy)
4996 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4997
4998 return Idx;
4999 };
5000 IdxPre = nullptr;
5001
5002 // If the base is a vector type, then we are forming a vector element lvalue
5003 // with this subscript.
5004 if (E->getBase()->getType()->isSubscriptableVectorType() &&
5006 // Emit the vector as an lvalue to get its address.
5007 LValue LHS = EmitLValue(E->getBase());
5008 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
5009 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
5010 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
5011 LHS.getBaseInfo(), TBAAAccessInfo());
5012 }
5013
5014 // The HLSL runtime handles subscript expressions on global resource arrays
5015 // and objects with HLSL buffer layouts.
5016 if (getLangOpts().HLSL) {
5017 std::optional<LValue> LV;
5018 if (E->getType()->isHLSLResourceRecord() ||
5020 LV = CGM.getHLSLRuntime().emitResourceArraySubscriptExpr(E, *this);
5021 } else if (E->getType().getAddressSpace() == LangAS::hlsl_constant) {
5022 LV = CGM.getHLSLRuntime().emitBufferArraySubscriptExpr(E, *this,
5023 EmitIdxAfterBase);
5024 }
5025 if (LV.has_value())
5026 return *LV;
5027 }
5028
5029 // All the other cases basically behave like simple offsetting.
5030
5031 // Handle the extvector case we ignored above.
5033 LValue LV = EmitLValue(E->getBase());
5034 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5036
5037 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
5038 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
5039 SignedIndices, E->getExprLoc());
5040 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
5041 CGM.getTBAAInfoForSubobject(LV, EltType));
5042 }
5043
5044 LValueBaseInfo EltBaseInfo;
5045 TBAAAccessInfo EltTBAAInfo;
5047 if (const VariableArrayType *vla =
5048 getContext().getAsVariableArrayType(E->getType())) {
5049 // The base must be a pointer, which is not an aggregate. Emit
5050 // it. It needs to be emitted first in case it's what captures
5051 // the VLA bounds.
5052 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5053 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5054
5055 // The element count here is the total number of non-VLA elements.
5056 llvm::Value *numElements = getVLASize(vla).NumElts;
5057
5058 // Effectively, the multiply by the VLA size is part of the GEP.
5059 // GEP indexes are signed, and scaling an index isn't permitted to
5060 // signed-overflow, so we use the same semantics for our explicit
5061 // multiply. We suppress this if overflow is not undefined behavior.
5062 if (getLangOpts().PointerOverflowDefined) {
5063 Idx = Builder.CreateMul(Idx, numElements);
5064 } else {
5065 Idx = Builder.CreateNSWMul(Idx, numElements);
5066 }
5067
5068 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
5069 !getLangOpts().PointerOverflowDefined,
5070 SignedIndices, E->getExprLoc());
5071
5072 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
5073 // Indexing over an interface, as in "NSString *P; P[4];"
5074
5075 // Emit the base pointer.
5076 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5077 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5078
5079 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
5080 llvm::Value *InterfaceSizeVal =
5081 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
5082
5083 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
5084
5085 // We don't necessarily build correct LLVM struct types for ObjC
5086 // interfaces, so we can't rely on GEP to do this scaling
5087 // correctly, so we need to cast to i8*. FIXME: is this actually
5088 // true? A lot of other things in the fragile ABI would break...
5089 llvm::Type *OrigBaseElemTy = Addr.getElementType();
5090
5091 // Do the GEP.
5092 CharUnits EltAlign =
5093 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
5094 llvm::Value *EltPtr =
5095 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
5096 ScaledIdx, false, SignedIndices, E->getExprLoc());
5097 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
5098 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
5099 // If this is A[i] where A is an array, the frontend will have decayed the
5100 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
5101 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
5102 // "gep x, i" here. Emit one "gep A, 0, i".
5103 assert(Array->getType()->isArrayType() &&
5104 "Array to pointer decay must have array source type!");
5105 LValue ArrayLV;
5106 // For simple multidimensional array indexing, set the 'accessed' flag for
5107 // better bounds-checking of the base expression.
5108 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
5109 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
5110 else
5111 ArrayLV = EmitLValue(Array);
5112 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5113
5114 if (SanOpts.has(SanitizerKind::ArrayBounds))
5115 EmitCountedByBoundsChecking(Array, Array->getType(), ArrayLV.getAddress(),
5116 E->getIdx()->getType(), Idx, Accessed,
5117 /*FlexibleArray=*/true);
5118
5119 // Propagate the alignment from the array itself to the result.
5120 QualType arrayType = Array->getType();
5122 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
5123 E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices,
5124 E->getExprLoc(), &arrayType, E->getBase());
5125 EltBaseInfo = ArrayLV.getBaseInfo();
5126 if (!CGM.getCodeGenOpts().NewStructPathTBAA) {
5127 // Since CodeGenTBAA::getTypeInfoHelper only handles array types for
5128 // new struct path TBAA, we must a use a plain access.
5129 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
5130 } else if (ArrayLV.getTBAAInfo().isMayAlias()) {
5131 EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5132 } else if (ArrayLV.getTBAAInfo().isIncomplete()) {
5133 // The array element is complete, even if the array is not.
5134 EltTBAAInfo = CGM.getTBAAAccessInfo(E->getType());
5135 } else {
5136 // The TBAA access info from the array (base) lvalue is ordinary. We will
5137 // adapt it to create access info for the element.
5138 EltTBAAInfo = ArrayLV.getTBAAInfo();
5139
5140 // We retain the TBAA struct path (BaseType and Offset members) from the
5141 // array. In the TBAA representation, we map any array access to the
5142 // element at index 0, as the index is generally a runtime value. This
5143 // element has the same offset in the base type as the array itself.
5144 // If the array lvalue had no base type, there is no point trying to
5145 // generate one, since an array itself is not a valid base type.
5146
5147 // We also retain the access type from the base lvalue, but the access
5148 // size must be updated to the size of an individual element.
5149 EltTBAAInfo.Size =
5151 }
5152 } else {
5153 // The base must be a pointer; emit it with an estimate of its alignment.
5154 Address BaseAddr =
5155 EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
5156 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5157 QualType ptrType = E->getBase()->getType();
5158 Addr = emitArraySubscriptGEP(*this, BaseAddr, Idx, E->getType(),
5159 !getLangOpts().PointerOverflowDefined,
5160 SignedIndices, E->getExprLoc(), &ptrType,
5161 E->getBase());
5162
5163 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
5164 StructFieldAccess Visitor;
5165 const Expr *Base = Visitor.Visit(E->getBase());
5166
5167 if (const auto *CE = dyn_cast_if_present<CastExpr>(Base);
5168 CE && CE->getCastKind() == CK_LValueToRValue)
5170 E->getIdx()->getType(), Idx, Accessed,
5171 /*FlexibleArray=*/false);
5172 }
5173 }
5174
5175 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
5176
5177 if (getLangOpts().ObjC &&
5178 getLangOpts().getGC() != LangOptions::NonGC) {
5181 }
5182 return LV;
5183}
5184
5186 llvm::Value *Idx = EmitScalarExpr(E);
5187 if (Idx->getType() == IntPtrTy)
5188 return Idx;
5189 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
5190 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
5191}
5192
5194 const MatrixSingleSubscriptExpr *E) {
5195 LValue Base = EmitLValue(E->getBase());
5196 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
5197
5198 RawAddress MatAddr = Base.getAddress();
5199 if (getLangOpts().HLSL &&
5201 MatAddr = CGM.getHLSLRuntime().createBufferMatrixTempAddress(
5202 Base, E->getExprLoc(), *this);
5203
5204 return LValue::MakeMatrixRow(MaybeConvertMatrixAddress(MatAddr, *this),
5205 RowIdx, E->getBase()->getType(),
5206 Base.getBaseInfo(), TBAAAccessInfo());
5207}
5208
5210 assert(
5211 !E->isIncomplete() &&
5212 "incomplete matrix subscript expressions should be rejected during Sema");
5213 LValue Base = EmitLValue(E->getBase());
5214
5215 // Extend or truncate the index type to 32 or 64-bits if needed.
5216 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
5217 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
5218 llvm::MatrixBuilder MB(Builder);
5219 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
5220 unsigned NumCols = MatrixTy->getNumColumns();
5221 unsigned NumRows = MatrixTy->getNumRows();
5222 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
5224 llvm::Value *FinalIdx =
5225 MB.CreateIndex(RowIdx, ColIdx, NumRows, NumCols, IsMatrixRowMajor);
5226
5227 return LValue::MakeMatrixElt(
5228 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
5229 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
5230}
5231
5233 LValueBaseInfo &BaseInfo,
5234 TBAAAccessInfo &TBAAInfo,
5235 QualType BaseTy, QualType ElTy,
5236 bool IsLowerBound) {
5237 LValue BaseLVal;
5238 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
5239 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
5240 if (BaseTy->isArrayType()) {
5241 Address Addr = BaseLVal.getAddress();
5242 BaseInfo = BaseLVal.getBaseInfo();
5243
5244 // If the array type was an incomplete type, we need to make sure
5245 // the decay ends up being the right type.
5246 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
5247 Addr = Addr.withElementType(NewTy);
5248
5249 // Note that VLA pointers are always decayed, so we don't need to do
5250 // anything here.
5251 if (!BaseTy->isVariableArrayType()) {
5252 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
5253 "Expected pointer to array");
5254 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
5255 }
5256
5257 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
5258 }
5259 LValueBaseInfo TypeBaseInfo;
5260 TBAAAccessInfo TypeTBAAInfo;
5261 CharUnits Align =
5262 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
5263 BaseInfo.mergeForCast(TypeBaseInfo);
5264 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
5265 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
5266 CGF.ConvertTypeForMem(ElTy), Align);
5267 }
5268 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
5269}
5270
5272 bool IsLowerBound) {
5273
5274 assert(!E->isOpenACCArraySection() &&
5275 "OpenACC Array section codegen not implemented");
5276
5278 QualType ResultExprTy;
5279 if (auto *AT = getContext().getAsArrayType(BaseTy))
5280 ResultExprTy = AT->getElementType();
5281 else
5282 ResultExprTy = BaseTy->getPointeeType();
5283 llvm::Value *Idx = nullptr;
5284 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
5285 // Requesting lower bound or upper bound, but without provided length and
5286 // without ':' symbol for the default length -> length = 1.
5287 // Idx = LowerBound ?: 0;
5288 if (auto *LowerBound = E->getLowerBound()) {
5289 Idx = Builder.CreateIntCast(
5290 EmitScalarExpr(LowerBound), IntPtrTy,
5291 LowerBound->getType()->hasSignedIntegerRepresentation());
5292 } else
5293 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
5294 } else {
5295 // Try to emit length or lower bound as constant. If this is possible, 1
5296 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
5297 // IR (LB + Len) - 1.
5298 auto &C = CGM.getContext();
5299 auto *Length = E->getLength();
5300 llvm::APSInt ConstLength;
5301 if (Length) {
5302 // Idx = LowerBound + Length - 1;
5303 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
5304 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
5305 Length = nullptr;
5306 }
5307 auto *LowerBound = E->getLowerBound();
5308 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
5309 if (LowerBound) {
5310 if (std::optional<llvm::APSInt> LB =
5311 LowerBound->getIntegerConstantExpr(C)) {
5312 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
5313 LowerBound = nullptr;
5314 }
5315 }
5316 if (!Length)
5317 --ConstLength;
5318 else if (!LowerBound)
5319 --ConstLowerBound;
5320
5321 if (Length || LowerBound) {
5322 auto *LowerBoundVal =
5323 LowerBound
5324 ? Builder.CreateIntCast(
5325 EmitScalarExpr(LowerBound), IntPtrTy,
5326 LowerBound->getType()->hasSignedIntegerRepresentation())
5327 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
5328 auto *LengthVal =
5329 Length
5330 ? Builder.CreateIntCast(
5331 EmitScalarExpr(Length), IntPtrTy,
5332 Length->getType()->hasSignedIntegerRepresentation())
5333 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
5334 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
5335 /*HasNUW=*/false,
5336 !getLangOpts().PointerOverflowDefined);
5337 if (Length && LowerBound) {
5338 Idx = Builder.CreateSub(
5339 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
5340 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
5341 }
5342 } else
5343 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
5344 } else {
5345 // Idx = ArraySize - 1;
5346 QualType ArrayTy = BaseTy->isPointerType()
5348 : BaseTy;
5349 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
5350 Length = VAT->getSizeExpr();
5351 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
5352 ConstLength = *L;
5353 Length = nullptr;
5354 }
5355 } else {
5356 auto *CAT = C.getAsConstantArrayType(ArrayTy);
5357 assert(CAT && "unexpected type for array initializer");
5358 ConstLength = CAT->getSize();
5359 }
5360 if (Length) {
5361 auto *LengthVal = Builder.CreateIntCast(
5362 EmitScalarExpr(Length), IntPtrTy,
5363 Length->getType()->hasSignedIntegerRepresentation());
5364 Idx = Builder.CreateSub(
5365 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
5366 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
5367 } else {
5368 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
5369 --ConstLength;
5370 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
5371 }
5372 }
5373 }
5374 assert(Idx);
5375
5376 Address EltPtr = Address::invalid();
5377 LValueBaseInfo BaseInfo;
5378 TBAAAccessInfo TBAAInfo;
5379 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
5380 // The base must be a pointer, which is not an aggregate. Emit
5381 // it. It needs to be emitted first in case it's what captures
5382 // the VLA bounds.
5383 Address Base =
5384 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
5385 BaseTy, VLA->getElementType(), IsLowerBound);
5386 // The element count here is the total number of non-VLA elements.
5387 llvm::Value *NumElements = getVLASize(VLA).NumElts;
5388
5389 // Effectively, the multiply by the VLA size is part of the GEP.
5390 // GEP indexes are signed, and scaling an index isn't permitted to
5391 // signed-overflow, so we use the same semantics for our explicit
5392 // multiply. We suppress this if overflow is not undefined behavior.
5393 if (getLangOpts().PointerOverflowDefined)
5394 Idx = Builder.CreateMul(Idx, NumElements);
5395 else
5396 Idx = Builder.CreateNSWMul(Idx, NumElements);
5397 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
5398 !getLangOpts().PointerOverflowDefined,
5399 /*signedIndices=*/false, E->getExprLoc());
5400 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
5401 // If this is A[i] where A is an array, the frontend will have decayed the
5402 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
5403 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
5404 // "gep x, i" here. Emit one "gep A, 0, i".
5405 assert(Array->getType()->isArrayType() &&
5406 "Array to pointer decay must have array source type!");
5407 LValue ArrayLV;
5408 // For simple multidimensional array indexing, set the 'accessed' flag for
5409 // better bounds-checking of the base expression.
5410 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
5411 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
5412 else
5413 ArrayLV = EmitLValue(Array);
5414
5415 // Propagate the alignment from the array itself to the result.
5416 EltPtr = emitArraySubscriptGEP(
5417 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
5418 ResultExprTy, !getLangOpts().PointerOverflowDefined,
5419 /*signedIndices=*/false, E->getExprLoc());
5420 BaseInfo = ArrayLV.getBaseInfo();
5421 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
5422 } else {
5423 Address Base =
5424 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
5425 ResultExprTy, IsLowerBound);
5426 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
5427 !getLangOpts().PointerOverflowDefined,
5428 /*signedIndices=*/false, E->getExprLoc());
5429 }
5430
5431 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
5432}
5433
5436 // Emit the base vector as an l-value.
5437 LValue Base;
5438
5439 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
5440 if (E->isArrow()) {
5441 // If it is a pointer to a vector, emit the address and form an lvalue with
5442 // it.
5443 LValueBaseInfo BaseInfo;
5444 TBAAAccessInfo TBAAInfo;
5445 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
5446 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
5447 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
5448 Base.getQuals().removeObjCGCAttr();
5449 } else if (E->getBase()->isGLValue()) {
5450 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
5451 // emit the base as an lvalue.
5452 assert(E->getBase()->getType()->isVectorType());
5453 Base = EmitLValue(E->getBase());
5454 } else {
5455 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
5456 assert(E->getBase()->getType()->isVectorType() &&
5457 "Result must be a vector");
5458 llvm::Value *Vec = EmitScalarExpr(E->getBase());
5459
5460 // Store the vector to memory (because LValue wants an address).
5461 Address VecMem = CreateMemTemp(E->getBase()->getType());
5462 // need to zero extend an hlsl boolean vector to store it back to memory
5463 QualType Ty = E->getBase()->getType();
5464 llvm::Type *LTy = convertTypeForLoadStore(Ty, Vec->getType());
5465 if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits())
5466 Vec = Builder.CreateZExt(Vec, LTy);
5467 Builder.CreateStore(Vec, VecMem);
5469 }
5470
5471 QualType type =
5472 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
5473
5474 // Encode the element access list into a vector of unsigned indices.
5476 E->getEncodedElementAccess(Indices);
5477
5478 if (Base.isSimple()) {
5479 llvm::Constant *CV =
5480 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
5481 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
5482 Base.getBaseInfo(), TBAAAccessInfo());
5483 }
5484
5485 if (Base.isMatrixRow()) {
5486 if (auto *RowIdx =
5487 llvm::dyn_cast<llvm::ConstantInt>(Base.getMatrixRowIdx())) {
5489 QualType MatTy = Base.getType();
5490 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
5491 unsigned NumCols = Indices.size();
5492 unsigned NumRows = MT->getNumRows();
5493 unsigned Row = RowIdx->getZExtValue();
5494 QualType VecQT = E->getBase()->getType();
5495 if (NumCols != MT->getNumColumns()) {
5496 const auto *EVT = VecQT->getAs<ExtVectorType>();
5497 QualType ElemQT = EVT->getElementType();
5498 VecQT = getContext().getExtVectorType(ElemQT, NumCols);
5499 }
5500 for (unsigned C = 0; C < NumCols; ++C) {
5501 unsigned Col = Indices[C];
5502 unsigned Linear = Col * NumRows + Row;
5503 MatIndices.push_back(llvm::ConstantInt::get(Int32Ty, Linear));
5504 }
5505
5506 llvm::Constant *ConstIdxs = llvm::ConstantVector::get(MatIndices);
5507 return LValue::MakeExtVectorElt(Base.getMatrixAddress(), ConstIdxs, VecQT,
5508 Base.getBaseInfo(), TBAAAccessInfo());
5509 }
5510 llvm::Constant *Cols =
5511 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
5512 // Note: intentionally not using E.getType() so we can reuse isMatrixRow()
5513 // implementations in EmitLoadOfLValue & EmitStoreThroughLValue and don't
5514 // need the LValue to have its own number of rows and columns when the
5515 // type is a vector.
5517 Base.getMatrixAddress(), Base.getMatrixRowIdx(), Cols, Base.getType(),
5518 Base.getBaseInfo(), TBAAAccessInfo());
5519 }
5520
5521 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
5522
5523 llvm::Constant *BaseElts = Base.getExtVectorElts();
5525
5526 for (unsigned Index : Indices)
5527 CElts.push_back(BaseElts->getAggregateElement(Index));
5528 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
5529 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
5530 Base.getBaseInfo(), TBAAAccessInfo());
5531}
5532
5534 const Expr *UnderlyingBaseExpr = E->IgnoreParens();
5535 while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(UnderlyingBaseExpr))
5536 UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens();
5537 return getContext().isSentinelNullExpr(UnderlyingBaseExpr);
5538}
5539
5541 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
5543 return EmitDeclRefLValue(DRE);
5544 }
5545
5546 if (getLangOpts().HLSL) {
5547 QualType QT = E->getType();
5549 return CGM.getHLSLRuntime().emitBufferMemberExpr(*this, E);
5550
5552 std::optional<LValue> LV;
5553 LV = CGM.getHLSLRuntime().emitResourceMemberExpr(*this, E);
5554 if (LV.has_value())
5555 return *LV;
5556 }
5557 }
5558
5559 Expr *BaseExpr = E->getBase();
5560 // Check whether the underlying base pointer is a constant null.
5561 // If so, we do not set inbounds flag for GEP to avoid breaking some
5562 // old-style offsetof idioms.
5563 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
5565 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
5566 LValue BaseLV;
5567 if (E->isArrow()) {
5568 LValueBaseInfo BaseInfo;
5569 TBAAAccessInfo TBAAInfo;
5570 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
5571 QualType PtrTy = BaseExpr->getType()->getPointeeType();
5572 SanitizerSet SkippedChecks;
5573 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
5574 if (IsBaseCXXThis)
5575 SkippedChecks.set(SanitizerKind::Alignment, true);
5576 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
5577 SkippedChecks.set(SanitizerKind::Null, true);
5579 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
5580 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
5581 } else
5582 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
5583
5584 NamedDecl *ND = E->getMemberDecl();
5585 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
5586 LValue LV = EmitLValueForField(BaseLV, Field, IsInBounds);
5588 if (getLangOpts().OpenMP) {
5589 // If the member was explicitly marked as nontemporal, mark it as
5590 // nontemporal. If the base lvalue is marked as nontemporal, mark access
5591 // to children as nontemporal too.
5592 if ((IsWrappedCXXThis(BaseExpr) &&
5593 CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
5594 BaseLV.isNontemporal())
5595 LV.setNontemporal(/*Value=*/true);
5596 }
5597 return LV;
5598 }
5599
5600 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
5601 return EmitFunctionDeclLValue(*this, E, FD);
5602
5603 llvm_unreachable("Unhandled member declaration!");
5604}
5605
5606/// Given that we are currently emitting a lambda, emit an l-value for
5607/// one of its members.
5608///
5610 llvm::Value *ThisValue) {
5611 bool HasExplicitObjectParameter = false;
5612 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
5613 if (MD) {
5614 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
5615 assert(MD->getParent()->isLambda());
5616 assert(MD->getParent() == Field->getParent());
5617 }
5618 LValue LambdaLV;
5619 if (HasExplicitObjectParameter) {
5620 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
5621 auto It = LocalDeclMap.find(D);
5622 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
5623 Address AddrOfExplicitObject = It->getSecond();
5624 if (D->getType()->isReferenceType())
5625 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
5627 else
5628 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
5630
5631 // Make sure we have an lvalue to the lambda itself and not a derived class.
5632 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
5633 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
5634 if (ThisTy != LambdaTy) {
5635 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
5637 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
5638 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
5640 LambdaLV = MakeAddrLValue(Base, T);
5641 }
5642 } else {
5643 CanQualType LambdaTagType =
5644 getContext().getCanonicalTagType(Field->getParent());
5645 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
5646 }
5647 return EmitLValueForField(LambdaLV, Field);
5648}
5649
5651 return EmitLValueForLambdaField(Field, CXXABIThisValue);
5652}
5653
5654/// Get the field index in the debug info. The debug info structure/union
5655/// will ignore the unnamed bitfields.
5657 unsigned FieldIndex) {
5658 unsigned I = 0, Skipped = 0;
5659
5660 for (auto *F : Rec->getDefinition()->fields()) {
5661 if (I == FieldIndex)
5662 break;
5663 if (F->isUnnamedBitField())
5664 Skipped++;
5665 I++;
5666 }
5667
5668 return FieldIndex - Skipped;
5669}
5670
5671/// Get the address of a zero-sized field within a record. The resulting
5672/// address doesn't necessarily have the right type.
5674 const FieldDecl *Field,
5675 bool IsInBounds) {
5677 CGF.getContext().getFieldOffset(Field));
5678 if (Offset.isZero())
5679 return Base;
5680 Base = Base.withElementType(CGF.Int8Ty);
5681 if (!IsInBounds)
5682 return CGF.Builder.CreateConstByteGEP(Base, Offset);
5683 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
5684}
5685
5686/// Drill down to the storage of a field without walking into reference types,
5687/// and without respect for pointer field protection.
5688///
5689/// The resulting address doesn't necessarily have the right type.
5691 const FieldDecl *field,
5692 bool IsInBounds) {
5693 if (isEmptyFieldForLayout(CGF.getContext(), field))
5694 return emitAddrOfZeroSizeField(CGF, base, field, IsInBounds);
5695
5696 const RecordDecl *rec = field->getParent();
5697
5698 unsigned idx =
5699 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5700 llvm::Type *StructType =
5702
5703 if (CGF.getLangOpts().EmitLogicalPointer)
5704 return RawAddress(
5705 CGF.Builder.CreateStructuredGEP(StructType, base.emitRawPointer(CGF),
5706 {CGF.Builder.getSize(idx)}),
5707 base.getElementType(), base.getAlignment());
5708
5709 if (!IsInBounds)
5710 return CGF.Builder.CreateConstGEP2_32(base, 0, idx, field->getName());
5711
5712 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
5713}
5714
5715/// Drill down to the storage of a field without walking into reference types,
5716/// wrapping the address in an llvm.protected.field.ptr intrinsic for the
5717/// pointer field protection feature if necessary.
5718///
5719/// The resulting address doesn't necessarily have the right type.
5721 const FieldDecl *field, bool IsInBounds) {
5722 Address Addr = emitRawAddrOfFieldStorage(CGF, base, field, IsInBounds);
5723
5724 if (!CGF.getContext().isPFPField(field))
5725 return Addr;
5726
5727 return CGF.EmitAddressOfPFPField(base, Addr, field);
5728}
5729
5731 Address addr, const FieldDecl *field) {
5732 const RecordDecl *rec = field->getParent();
5733 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
5734 base.getType(), rec->getLocation());
5735
5736 unsigned idx =
5737 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5738
5740 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
5741}
5742
5743static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
5744 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
5745 if (!RD)
5746 return false;
5747
5748 if (RD->isDynamicClass())
5749 return true;
5750
5751 for (const auto &Base : RD->bases())
5752 if (hasAnyVptr(Base.getType(), Context))
5753 return true;
5754
5755 for (const FieldDecl *Field : RD->fields())
5756 if (hasAnyVptr(Field->getType(), Context))
5757 return true;
5758
5759 return false;
5760}
5761
5763 bool IsInBounds) {
5764 LValueBaseInfo BaseInfo = base.getBaseInfo();
5765
5766 if (field->isBitField()) {
5767 const CGRecordLayout &RL =
5768 CGM.getTypes().getCGRecordLayout(field->getParent());
5769 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
5770 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
5771 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
5772 Info.VolatileStorageSize != 0 &&
5773 field->getType()
5776 Address Addr = base.getAddress();
5777 unsigned Idx = RL.getLLVMFieldNo(field);
5778 const RecordDecl *rec = field->getParent();
5781 if (!UseVolatile) {
5782 if (!IsInPreservedAIRegion &&
5783 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5784 if (Idx != 0) {
5785 // For structs, we GEP to the field that the record layout suggests.
5786 if (!IsInBounds)
5787 Addr = Builder.CreateConstGEP2_32(Addr, 0, Idx, field->getName());
5788 else
5789 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
5790 }
5791 } else {
5792 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
5793 getContext().getCanonicalTagType(rec), rec->getLocation());
5794 Addr = Builder.CreatePreserveStructAccessIndex(
5795 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
5796 DbgInfo);
5797 }
5798 }
5799 const unsigned SS =
5800 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
5801 // Get the access type.
5802 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
5803 Addr = Addr.withElementType(FieldIntTy);
5804 if (UseVolatile) {
5805 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
5806 if (VolatileOffset)
5807 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
5808 }
5809
5810 QualType fieldType =
5811 field->getType().withCVRQualifiers(base.getVRQualifiers());
5812 // TODO: Support TBAA for bit fields.
5813 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
5814 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
5815 TBAAAccessInfo());
5816 }
5817
5818 // Fields of may-alias structures are may-alias themselves.
5819 // FIXME: this should get propagated down through anonymous structs
5820 // and unions.
5821 QualType FieldType = field->getType();
5822 const RecordDecl *rec = field->getParent();
5823 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
5824 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
5825 TBAAAccessInfo FieldTBAAInfo;
5826 if (base.getTBAAInfo().isMayAlias() ||
5827 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
5828 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5829 } else if (rec->isUnion()) {
5830 // TODO: Support TBAA for unions.
5831 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5832 } else {
5833 // If no base type been assigned for the base access, then try to generate
5834 // one for this base lvalue.
5835 FieldTBAAInfo = base.getTBAAInfo();
5836 if (!FieldTBAAInfo.BaseType) {
5837 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
5838 assert(!FieldTBAAInfo.Offset &&
5839 "Nonzero offset for an access with no base type!");
5840 }
5841
5842 // Adjust offset to be relative to the base type.
5843 const ASTRecordLayout &Layout =
5845 unsigned CharWidth = getContext().getCharWidth();
5846 if (FieldTBAAInfo.BaseType)
5847 FieldTBAAInfo.Offset +=
5848 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
5849
5850 // Update the final access type and size.
5851 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
5852 FieldTBAAInfo.Size =
5854 }
5855
5856 Address addr = base.getAddress();
5858 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
5859 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
5860 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5861 ClassDef->isDynamicClass()) {
5862 // Getting to any field of dynamic object requires stripping dynamic
5863 // information provided by invariant.group. This is because accessing
5864 // fields may leak the real address of dynamic object, which could result
5865 // in miscompilation when leaked pointer would be compared.
5866 auto *stripped =
5867 Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
5868 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5869 }
5870 }
5871
5872 unsigned RecordCVR = base.getVRQualifiers();
5873 if (rec->isUnion()) {
5874 // For unions, there is no pointer adjustment.
5875 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5876 hasAnyVptr(FieldType, getContext()))
5877 // Because unions can easily skip invariant.barriers, we need to add
5878 // a barrier every time CXXRecord field with vptr is referenced.
5879 addr = Builder.CreateLaunderInvariantGroup(addr);
5880
5882 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5883 // Remember the original union field index
5884 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5885 rec->getLocation());
5886 addr =
5887 Address(Builder.CreatePreserveUnionAccessIndex(
5888 addr.emitRawPointer(*this),
5889 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5890 addr.getElementType(), addr.getAlignment());
5891 }
5892
5893 if (FieldType->isReferenceType())
5894 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5895 } else {
5896 if (!IsInPreservedAIRegion &&
5897 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5898 // For structs, we GEP to the field that the record layout suggests.
5899 addr = emitAddrOfFieldStorage(*this, addr, field, IsInBounds);
5900 else
5901 // Remember the original struct field index
5902 addr = emitPreserveStructAccess(*this, base, addr, field);
5903 }
5904
5905 // If this is a reference field, load the reference right now.
5906 if (FieldType->isReferenceType()) {
5907 LValue RefLVal =
5908 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5909 if (RecordCVR & Qualifiers::Volatile)
5910 RefLVal.getQuals().addVolatile();
5911 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5912
5913 // Qualifiers on the struct don't apply to the referencee.
5914 RecordCVR = 0;
5915 FieldType = FieldType->getPointeeType();
5916 }
5917
5918 // Make sure that the address is pointing to the right type. This is critical
5919 // for both unions and structs.
5920 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5921
5922 if (field->hasAttr<AnnotateAttr>())
5923 addr = EmitFieldAnnotations(field, addr);
5924
5925 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5926 LV.getQuals().addCVRQualifiers(RecordCVR);
5927
5928 // __weak attribute on a field is ignored.
5931
5932 return LV;
5933}
5934
5935LValue
5937 const FieldDecl *Field) {
5938 QualType FieldType = Field->getType();
5939
5940 if (!FieldType->isReferenceType())
5941 return EmitLValueForField(Base, Field);
5942
5944 *this, Base.getAddress(), Field,
5945 /*IsInBounds=*/!getLangOpts().PointerOverflowDefined);
5946
5947 // Make sure that the address is pointing to the right type.
5948 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5949 V = V.withElementType(llvmType);
5950
5951 // TODO: Generate TBAA information that describes this access as a structure
5952 // member access and not just an access to an object of the field's type. This
5953 // should be similar to what we do in EmitLValueForField().
5954 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5955 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5956 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5957 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5958 CGM.getTBAAInfoForSubobject(Base, FieldType));
5959}
5960
5962 if (E->isFileScope()) {
5963 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5964 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5965 }
5966 if (E->getType()->isVariablyModifiedType())
5967 // make sure to emit the VLA size.
5969
5970 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5971 const Expr *InitExpr = E->getInitializer();
5973
5974 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5975 /*Init*/ true);
5976
5977 // Block-scope compound literals are destroyed at the end of the enclosing
5978 // scope in C.
5979 if (!getLangOpts().CPlusPlus)
5982 E->getType(), getDestroyer(DtorKind),
5983 DtorKind & EHCleanup);
5984
5985 return Result;
5986}
5987
5989 if (!E->isGLValue())
5990 // Initializing an aggregate temporary in C++11: T{...}.
5991 return EmitAggExprToLValue(E);
5992
5993 // An lvalue initializer list must be initializing a reference.
5994 assert(E->isTransparent() && "non-transparent glvalue init list");
5995 return EmitLValue(E->getInit(0));
5996}
5997
5998/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5999/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
6000/// LValue is returned and the current block has been terminated.
6001static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
6002 const Expr *Operand) {
6003 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
6004 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
6005 return std::nullopt;
6006 }
6007
6008 return CGF.EmitLValue(Operand);
6009}
6010
6011namespace {
6012// Handle the case where the condition is a constant evaluatable simple integer,
6013// which means we don't have to separately handle the true/false blocks.
6014std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
6015 CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
6016 const Expr *condExpr = E->getCond();
6017 bool CondExprBool;
6018 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
6019 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
6020 if (!CondExprBool)
6021 std::swap(Live, Dead);
6022
6023 if (!CGF.ContainsLabel(Dead)) {
6024 // If the true case is live, we need to track its region.
6025 CGF.incrementProfileCounter(CondExprBool ? CGF.UseExecPath
6026 : CGF.UseSkipPath,
6027 E, /*UseBoth=*/true);
6028 CGF.markStmtMaybeUsed(Dead);
6029 // If a throw expression we emit it and return an undefined lvalue
6030 // because it can't be used.
6031 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
6032 CGF.EmitCXXThrowExpr(ThrowExpr);
6033 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
6034 llvm::Type *Ty = CGF.DefaultPtrTy;
6035 return CGF.MakeAddrLValue(
6036 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
6037 Dead->getType());
6038 }
6039 return CGF.EmitLValue(Live);
6040 }
6041 }
6042 return std::nullopt;
6043}
6044struct ConditionalInfo {
6045 llvm::BasicBlock *lhsBlock, *rhsBlock;
6046 std::optional<LValue> LHS, RHS;
6047};
6048
6049// Create and generate the 3 blocks for a conditional operator.
6050// Leaves the 'current block' in the continuation basic block.
6051template<typename FuncTy>
6052ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
6053 const AbstractConditionalOperator *E,
6054 const FuncTy &BranchGenFunc) {
6055 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
6056 CGF.createBasicBlock("cond.false"), std::nullopt,
6057 std::nullopt};
6058 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
6059
6061 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
6062 CGF.getProfileCount(E));
6063
6064 // Any temporaries created here are conditional.
6065 CGF.EmitBlock(Info.lhsBlock);
6067 eval.begin(CGF);
6068 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
6069 eval.end(CGF);
6070 Info.lhsBlock = CGF.Builder.GetInsertBlock();
6071
6072 if (Info.LHS)
6073 CGF.Builder.CreateBr(endBlock);
6074
6075 // Any temporaries created here are conditional.
6076 CGF.EmitBlock(Info.rhsBlock);
6078 eval.begin(CGF);
6079 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
6080 eval.end(CGF);
6081 Info.rhsBlock = CGF.Builder.GetInsertBlock();
6082 CGF.EmitBlock(endBlock);
6083
6084 return Info;
6085}
6086} // namespace
6087
6089 const AbstractConditionalOperator *E) {
6090 if (!E->isGLValue()) {
6091 // ?: here should be an aggregate.
6092 assert(hasAggregateEvaluationKind(E->getType()) &&
6093 "Unexpected conditional operator!");
6094 return (void)EmitAggExprToLValue(E);
6095 }
6096
6097 OpaqueValueMapping binding(*this, E);
6098 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
6099 return;
6100
6101 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
6102 CGF.EmitIgnoredExpr(E);
6103 return LValue{};
6104 });
6105}
6108 if (!expr->isGLValue()) {
6109 // ?: here should be an aggregate.
6110 assert(hasAggregateEvaluationKind(expr->getType()) &&
6111 "Unexpected conditional operator!");
6112 return EmitAggExprToLValue(expr);
6113 }
6114
6115 OpaqueValueMapping binding(*this, expr);
6116 if (std::optional<LValue> Res =
6117 HandleConditionalOperatorLValueSimpleCase(*this, expr))
6118 return *Res;
6119
6120 ConditionalInfo Info = EmitConditionalBlocks(
6121 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
6122 return EmitLValueOrThrowExpression(CGF, E);
6123 });
6124
6125 if ((Info.LHS && !Info.LHS->isSimple()) ||
6126 (Info.RHS && !Info.RHS->isSimple()))
6127 return EmitUnsupportedLValue(expr, "conditional operator");
6128
6129 if (Info.LHS && Info.RHS) {
6130 Address lhsAddr = Info.LHS->getAddress();
6131 Address rhsAddr = Info.RHS->getAddress();
6133 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
6134 Builder.GetInsertBlock(), expr->getType());
6135 AlignmentSource alignSource =
6136 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
6137 Info.RHS->getBaseInfo().getAlignmentSource());
6138 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
6139 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
6140 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
6141 TBAAInfo);
6142 } else {
6143 assert((Info.LHS || Info.RHS) &&
6144 "both operands of glvalue conditional are throw-expressions?");
6145 return Info.LHS ? *Info.LHS : *Info.RHS;
6146 }
6147}
6148
6149/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
6150/// type. If the cast is to a reference, we can have the usual lvalue result,
6151/// otherwise if a cast is needed by the code generator in an lvalue context,
6152/// then it must mean that we need the address of an aggregate in order to
6153/// access one of its members. This can happen for all the reasons that casts
6154/// are permitted with aggregate result, including noop aggregate casts, and
6155/// cast from scalar to union.
6157 llvm::scope_exit RestoreCurCast([this, Prev = CurCast] { CurCast = Prev; });
6158 CurCast = E;
6159 switch (E->getCastKind()) {
6160 case CK_ToVoid:
6161 case CK_BitCast:
6162 case CK_LValueToRValueBitCast:
6163 case CK_ArrayToPointerDecay:
6164 case CK_FunctionToPointerDecay:
6165 case CK_NullToMemberPointer:
6166 case CK_NullToPointer:
6167 case CK_IntegralToPointer:
6168 case CK_PointerToIntegral:
6169 case CK_PointerToBoolean:
6170 case CK_IntegralCast:
6171 case CK_BooleanToSignedIntegral:
6172 case CK_IntegralToBoolean:
6173 case CK_IntegralToFloating:
6174 case CK_FloatingToIntegral:
6175 case CK_FloatingToBoolean:
6176 case CK_FloatingCast:
6177 case CK_FloatingRealToComplex:
6178 case CK_FloatingComplexToReal:
6179 case CK_FloatingComplexToBoolean:
6180 case CK_FloatingComplexCast:
6181 case CK_FloatingComplexToIntegralComplex:
6182 case CK_IntegralRealToComplex:
6183 case CK_IntegralComplexToReal:
6184 case CK_IntegralComplexToBoolean:
6185 case CK_IntegralComplexCast:
6186 case CK_IntegralComplexToFloatingComplex:
6187 case CK_DerivedToBaseMemberPointer:
6188 case CK_BaseToDerivedMemberPointer:
6189 case CK_MemberPointerToBoolean:
6190 case CK_ReinterpretMemberPointer:
6191 case CK_AnyPointerToBlockPointerCast:
6192 case CK_ARCProduceObject:
6193 case CK_ARCConsumeObject:
6194 case CK_ARCReclaimReturnedObject:
6195 case CK_ARCExtendBlockObject:
6196 case CK_CopyAndAutoreleaseBlockObject:
6197 case CK_IntToOCLSampler:
6198 case CK_FloatingToFixedPoint:
6199 case CK_FixedPointToFloating:
6200 case CK_FixedPointCast:
6201 case CK_FixedPointToBoolean:
6202 case CK_FixedPointToIntegral:
6203 case CK_IntegralToFixedPoint:
6204 case CK_MatrixCast:
6205 case CK_HLSLVectorTruncation:
6206 case CK_HLSLMatrixTruncation:
6207 case CK_HLSLArrayRValue:
6208 case CK_HLSLElementwiseCast:
6209 case CK_HLSLAggregateSplatCast:
6210 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
6211
6212 case CK_Dependent:
6213 llvm_unreachable("dependent cast kind in IR gen!");
6214
6215 case CK_BuiltinFnToFnPtr:
6216 llvm_unreachable("builtin functions are handled elsewhere");
6217
6218 // These are never l-values; just use the aggregate emission code.
6219 case CK_NonAtomicToAtomic:
6220 case CK_AtomicToNonAtomic:
6221 return EmitAggExprToLValue(E);
6222
6223 case CK_Dynamic: {
6224 LValue LV = EmitLValue(E->getSubExpr());
6225 Address V = LV.getAddress();
6226 const auto *DCE = cast<CXXDynamicCastExpr>(E);
6228 }
6229
6230 case CK_ConstructorConversion:
6231 case CK_UserDefinedConversion:
6232 case CK_CPointerToObjCPointerCast:
6233 case CK_BlockPointerToObjCPointerCast:
6234 case CK_LValueToRValue:
6235 return EmitLValue(E->getSubExpr());
6236
6237 case CK_NoOp: {
6238 // CK_NoOp can model a qualification conversion, which can remove an array
6239 // bound and change the IR type.
6240 // FIXME: Once pointee types are removed from IR, remove this.
6241 LValue LV = EmitLValue(E->getSubExpr());
6242 // Propagate the volatile qualifer to LValue, if exist in E.
6244 LV.getQuals() = E->getType().getQualifiers();
6245 if (LV.isSimple()) {
6246 Address V = LV.getAddress();
6247 if (V.isValid()) {
6248 llvm::Type *T = ConvertTypeForMem(E->getType());
6249 if (V.getElementType() != T)
6250 LV.setAddress(V.withElementType(T));
6251 }
6252 }
6253 return LV;
6254 }
6255
6256 case CK_UncheckedDerivedToBase:
6257 case CK_DerivedToBase: {
6258 auto *DerivedClassDecl = E->getSubExpr()->getType()->castAsCXXRecordDecl();
6259 LValue LV = EmitLValue(E->getSubExpr());
6260 Address This = LV.getAddress();
6261
6262 // Perform the derived-to-base conversion
6264 This, DerivedClassDecl, E->path_begin(), E->path_end(),
6265 /*NullCheckValue=*/false, E->getExprLoc());
6266
6267 // TODO: Support accesses to members of base classes in TBAA. For now, we
6268 // conservatively pretend that the complete object is of the base class
6269 // type.
6270 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
6271 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6272 }
6273 case CK_ToUnion:
6274 return EmitAggExprToLValue(E);
6275 case CK_BaseToDerived: {
6276 auto *DerivedClassDecl = E->getType()->castAsCXXRecordDecl();
6277 LValue LV = EmitLValue(E->getSubExpr());
6278
6279 // Perform the base-to-derived conversion
6281 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
6282 /*NullCheckValue=*/false);
6283
6284 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
6285 // performed and the object is not of the derived type.
6288 E->getType());
6289
6290 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
6291 EmitVTablePtrCheckForCast(E->getType(), Derived,
6292 /*MayBeNull=*/false, CFITCK_DerivedCast,
6293 E->getBeginLoc());
6294
6295 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
6296 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6297 }
6298 case CK_LValueBitCast: {
6299 // This must be a reinterpret_cast (or c-style equivalent).
6300 const auto *CE = cast<ExplicitCastExpr>(E);
6301
6302 CGM.EmitExplicitCastExprType(CE, this);
6303 LValue LV = EmitLValue(E->getSubExpr());
6305 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
6306
6307 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
6309 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
6310 E->getBeginLoc());
6311
6312 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
6313 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6314 }
6315 case CK_AddressSpaceConversion: {
6316 LValue LV = EmitLValue(E->getSubExpr());
6317 QualType DestTy = getContext().getPointerType(E->getType());
6318 llvm::Value *V =
6319 performAddrSpaceCast(LV.getPointer(*this), ConvertType(DestTy));
6321 LV.getAddress().getAlignment()),
6322 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
6323 }
6324 case CK_ObjCObjectLValueCast: {
6325 LValue LV = EmitLValue(E->getSubExpr());
6327 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
6328 CGM.getTBAAInfoForSubobject(LV, E->getType()));
6329 }
6330 case CK_ZeroToOCLOpaqueType:
6331 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
6332
6333 case CK_VectorSplat: {
6334 // LValue results of vector splats are only supported in HLSL.
6335 if (!getLangOpts().HLSL)
6336 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
6337 return EmitLValue(E->getSubExpr());
6338 }
6339 }
6340
6341 llvm_unreachable("Unhandled lvalue cast kind?");
6342}
6343
6348
6349std::pair<LValue, LValue>
6351 // Emitting the casted temporary through an opaque value.
6352 LValue BaseLV = EmitLValue(E->getArgLValue());
6354
6355 QualType ExprTy = E->getType();
6356 Address OutTemp = CreateIRTempWithoutCast(ExprTy);
6357 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
6358
6359 if (E->isInOut())
6361 TempLV);
6362
6364 return std::make_pair(BaseLV, TempLV);
6365}
6366
6368 CallArgList &Args, QualType Ty) {
6369
6370 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
6371
6372 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
6373 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
6374
6376
6377 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
6378 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast());
6379 Args.add(RValue::get(TmpAddr, *this), Ty);
6380 return TempLV;
6381}
6382
6383LValue
6386
6387 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
6388 it = OpaqueLValues.find(e);
6389
6390 if (it != OpaqueLValues.end())
6391 return it->second;
6392
6393 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
6394 return EmitLValue(e->getSourceExpr());
6395}
6396
6397RValue
6400
6401 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
6402 it = OpaqueRValues.find(e);
6403
6404 if (it != OpaqueRValues.end())
6405 return it->second;
6406
6407 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
6408 return EmitAnyExpr(e->getSourceExpr());
6409}
6410
6413 return OpaqueLValues.contains(E);
6414 return OpaqueRValues.contains(E);
6415}
6416
6418 const FieldDecl *FD,
6419 SourceLocation Loc) {
6420 QualType FT = FD->getType();
6421 LValue FieldLV = EmitLValueForField(LV, FD);
6422 switch (getEvaluationKind(FT)) {
6423 case TEK_Complex:
6424 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
6425 case TEK_Aggregate:
6426 return FieldLV.asAggregateRValue();
6427 case TEK_Scalar:
6428 // This routine is used to load fields one-by-one to perform a copy, so
6429 // don't load reference fields.
6430 if (FD->getType()->isReferenceType())
6431 return RValue::get(FieldLV.getPointer(*this));
6432 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
6433 // primitive load.
6434 if (FieldLV.isBitField())
6435 return EmitLoadOfLValue(FieldLV, Loc);
6436 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
6437 }
6438 llvm_unreachable("bad evaluation kind");
6439}
6440
6441//===--------------------------------------------------------------------===//
6442// Expression Emission
6443//===--------------------------------------------------------------------===//
6444
6447 llvm::CallBase **CallOrInvoke) {
6448 llvm::CallBase *CallOrInvokeStorage;
6449 if (!CallOrInvoke) {
6450 CallOrInvoke = &CallOrInvokeStorage;
6451 }
6452
6453 llvm::scope_exit AddCoroElideSafeOnExit([&] {
6454 if (E->isCoroElideSafe()) {
6455 auto *I = *CallOrInvoke;
6456 if (I)
6457 I->addFnAttr(llvm::Attribute::CoroElideSafe);
6458 }
6459 });
6460
6461 // Builtins never have block type.
6462 if (E->getCallee()->getType()->isBlockPointerType())
6463 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
6464
6465 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
6466 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
6467
6468 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
6469 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
6470
6471 // A CXXOperatorCallExpr is created even for explicit object methods, but
6472 // these should be treated like static function call.
6473 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
6474 if (const auto *MD =
6475 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
6476 MD && MD->isImplicitObjectMemberFunction())
6477 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
6478
6479 CGCallee callee = EmitCallee(E->getCallee());
6480
6481 if (callee.isBuiltin()) {
6482 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
6483 E, ReturnValue);
6484 }
6485
6486 if (callee.isPseudoDestructor()) {
6488 }
6489
6490 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
6491 /*Chain=*/nullptr, CallOrInvoke);
6492}
6493
6494/// Emit a CallExpr without considering whether it might be a subclass.
6497 llvm::CallBase **CallOrInvoke) {
6498 CGCallee Callee = EmitCallee(E->getCallee());
6499 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
6500 /*Chain=*/nullptr, CallOrInvoke);
6501}
6502
6503// Detect the unusual situation where an inline version is shadowed by a
6504// non-inline version. In that case we should pick the external one
6505// everywhere. That's GCC behavior too.
6507 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
6508 if (!PD->isInlineBuiltinDeclaration())
6509 return false;
6510 return true;
6511}
6512
6514 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
6515
6516 if (auto builtinID = FD->getBuiltinID()) {
6517 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
6518 std::string NoBuiltins = "no-builtins";
6519
6520 StringRef Ident = CGF.CGM.getMangledName(GD);
6521 std::string FDInlineName = (Ident + ".inline").str();
6522
6523 bool IsPredefinedLibFunction =
6525 bool HasAttributeNoBuiltin =
6526 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
6527 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
6528
6529 // When directing calling an inline builtin, call it through it's mangled
6530 // name to make it clear it's not the actual builtin.
6531 if (CGF.CurFn->getName() != FDInlineName &&
6533 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6534 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
6535 llvm::Module *M = Fn->getParent();
6536 llvm::Function *Clone = M->getFunction(FDInlineName);
6537 if (!Clone) {
6538 Clone = llvm::Function::Create(Fn->getFunctionType(),
6539 llvm::GlobalValue::InternalLinkage,
6540 Fn->getAddressSpace(), FDInlineName, M);
6541 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
6542 }
6543 return CGCallee::forDirect(Clone, GD);
6544 }
6545
6546 // Replaceable builtins provide their own implementation of a builtin. If we
6547 // are in an inline builtin implementation, avoid trivial infinite
6548 // recursion. Honor __attribute__((no_builtin("foo"))) or
6549 // __attribute__((no_builtin)) on the current function unless foo is
6550 // not a predefined library function which means we must generate the
6551 // builtin no matter what.
6552 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
6553 return CGCallee::forBuiltin(builtinID, FD);
6554 }
6555
6556 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6557 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
6558 FD->hasAttr<CUDAGlobalAttr>())
6559 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
6560 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
6561
6562 return CGCallee::forDirect(CalleePtr, GD);
6563}
6564
6566 if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6568 return GlobalDecl(FD);
6569}
6570
6572 E = E->IgnoreParens();
6573
6574 // Look through function-to-pointer decay.
6575 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
6576 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
6577 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
6578 return EmitCallee(ICE->getSubExpr());
6579 }
6580
6581 // Try to remember the original __ptrauth qualifier for loads of
6582 // function pointers.
6583 if (ICE->getCastKind() == CK_LValueToRValue) {
6584 const Expr *SubExpr = ICE->getSubExpr();
6585 if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) {
6586 std::pair<llvm::Value *, CGPointerAuthInfo> Result =
6588
6590 assert(FunctionType->isFunctionType());
6591
6592 GlobalDecl GD;
6593 if (const auto *VD =
6594 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) {
6595 GD = GlobalDecl(VD);
6596 }
6598 CGCallee Callee(CalleeInfo, Result.first, Result.second);
6599 return Callee;
6600 }
6601 }
6602
6603 // Resolve direct calls.
6604 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
6605 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
6607 }
6608 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
6609 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
6610 EmitIgnoredExpr(ME->getBase());
6611 return EmitDirectCallee(*this, FD);
6612 }
6613
6614 // Look through template substitutions.
6615 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
6616 return EmitCallee(NTTP->getReplacement());
6617
6618 // Treat pseudo-destructor calls differently.
6619 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
6621 }
6622
6623 // Otherwise, we have an indirect reference.
6624 llvm::Value *calleePtr;
6626 if (auto ptrType = E->getType()->getAs<PointerType>()) {
6627 calleePtr = EmitScalarExpr(E);
6628 functionType = ptrType->getPointeeType();
6629 } else {
6630 functionType = E->getType();
6631 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
6632 }
6633 assert(functionType->isFunctionType());
6634
6635 GlobalDecl GD;
6636 if (const auto *VD =
6637 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
6638 GD = GlobalDecl(VD);
6639
6640 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
6641 CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
6642 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
6643 return callee;
6644}
6645
6647 // Comma expressions just emit their LHS then their RHS as an l-value.
6648 if (E->getOpcode() == BO_Comma) {
6649 EmitIgnoredExpr(E->getLHS());
6651 return EmitLValue(E->getRHS());
6652 }
6653
6654 if (E->getOpcode() == BO_PtrMemD ||
6655 E->getOpcode() == BO_PtrMemI)
6657
6658 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
6659
6660 // Create a Key Instructions source location atom group that covers both
6661 // LHS and RHS expressions. Nested RHS expressions may get subsequently
6662 // separately grouped (1 below):
6663 //
6664 // 1. `a = b = c` -> Two atoms.
6665 // 2. `x = new(1)` -> One atom (for both addr store and value store).
6666 // 3. Complex and agg assignment -> One atom.
6668
6669 // Note that in all of these cases, __block variables need the RHS
6670 // evaluated first just in case the variable gets moved by the RHS.
6671
6672 switch (getEvaluationKind(E->getType())) {
6673 case TEK_Scalar: {
6674 if (PointerAuthQualifier PtrAuth =
6675 E->getLHS()->getType().getPointerAuth()) {
6677 LValue CopiedLV = LV;
6678 CopiedLV.getQuals().removePointerAuth();
6679 llvm::Value *RV =
6680 EmitPointerAuthQualify(PtrAuth, E->getRHS(), CopiedLV.getAddress());
6681 EmitNullabilityCheck(CopiedLV, RV, E->getExprLoc());
6682 EmitStoreThroughLValue(RValue::get(RV), CopiedLV);
6683 return LV;
6684 }
6685
6686 switch (E->getLHS()->getType().getObjCLifetime()) {
6688 return EmitARCStoreStrong(E, /*ignored*/ false).first;
6689
6691 return EmitARCStoreAutoreleasing(E).first;
6692
6693 // No reason to do any of these differently.
6697 break;
6698 }
6699
6700 // TODO: Can we de-duplicate this code with the corresponding code in
6701 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
6702 RValue RV;
6703 llvm::Value *Previous = nullptr;
6704 QualType SrcType = E->getRHS()->getType();
6705 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
6706 // we want to extract that value and potentially (if the bitfield sanitizer
6707 // is enabled) use it to check for an implicit conversion.
6708 if (E->getLHS()->refersToBitField()) {
6709 llvm::Value *RHS =
6711 RV = RValue::get(RHS);
6712 } else
6713 RV = EmitAnyExpr(E->getRHS());
6714
6716
6717 if (RV.isScalar())
6719
6720 if (LV.isBitField()) {
6721 llvm::Value *Result = nullptr;
6722 // If bitfield sanitizers are enabled we want to use the result
6723 // to check whether a truncation or sign change has occurred.
6724 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
6726 else
6728
6729 // If the expression contained an implicit conversion, make sure
6730 // to use the value before the scalar conversion.
6731 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
6732 QualType DstType = E->getLHS()->getType();
6733 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
6734 LV.getBitFieldInfo(), E->getExprLoc());
6735 } else
6736 EmitStoreThroughLValue(RV, LV);
6737
6738 if (getLangOpts().OpenMP)
6739 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
6740 E->getLHS());
6741 return LV;
6742 }
6743
6744 case TEK_Complex:
6746
6747 case TEK_Aggregate:
6748 // If the lang opt is HLSL and the LHS is a constant array
6749 // then we are performing a copy assignment and call a special
6750 // function because EmitAggExprToLValue emits to a temporary LValue
6752 return EmitHLSLArrayAssignLValue(E);
6753
6754 return EmitAggExprToLValue(E);
6755 }
6756 llvm_unreachable("bad evaluation kind");
6757}
6758
6759// This function implements trivial copy assignment for HLSL's
6760// assignable constant arrays.
6762 // Don't emit an LValue for the RHS because it might not be an LValue
6763 LValue LHS = EmitLValue(E->getLHS());
6764
6765 // If the RHS is a global resource array, copy all individual resources
6766 // into LHS.
6768 if (CGM.getHLSLRuntime().emitResourceArrayCopy(LHS, E->getRHS(), *this))
6769 return LHS;
6770
6771 // In C the RHS of an assignment operator is an RValue.
6772 // EmitAggregateAssign takes an LValue for the RHS. Instead we can call
6773 // EmitInitializationToLValue to emit an RValue into an LValue.
6775 return LHS;
6776}
6777
6779 llvm::CallBase **CallOrInvoke) {
6780 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
6781
6782 if (!RV.isScalar())
6783 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6785
6786 assert(E->getCallReturnType(getContext())->isReferenceType() &&
6787 "Can't have a scalar return unless the return type is a "
6788 "reference type!");
6789
6791}
6792
6794 // FIXME: This shouldn't require another copy.
6795 return EmitAggExprToLValue(E);
6796}
6797
6800 && "binding l-value to type which needs a temporary");
6801 AggValueSlot Slot = CreateAggTemp(E->getType());
6802 EmitCXXConstructExpr(E, Slot);
6804}
6805
6806LValue
6810
6812 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
6813 .withElementType(ConvertType(E->getType()));
6814}
6815
6820
6821LValue
6829
6832
6833 if (!RV.isScalar())
6834 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6836
6837 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
6838 "Can't have a scalar return unless the return type is a "
6839 "reference type!");
6840
6842}
6843
6845 Address V =
6846 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
6848}
6849
6851 const ObjCIvarDecl *Ivar) {
6852 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
6853}
6854
6855llvm::Value *
6857 const ObjCIvarDecl *Ivar) {
6858 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
6859 QualType PointerDiffType = getContext().getPointerDiffType();
6860 return Builder.CreateZExtOrTrunc(OffsetValue,
6861 getTypes().ConvertType(PointerDiffType));
6862}
6863
6865 llvm::Value *BaseValue,
6866 const ObjCIvarDecl *Ivar,
6867 unsigned CVRQualifiers) {
6868 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
6869 Ivar, CVRQualifiers);
6870}
6871
6873 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
6874 llvm::Value *BaseValue = nullptr;
6875 const Expr *BaseExpr = E->getBase();
6876 Qualifiers BaseQuals;
6877 QualType ObjectTy;
6878 if (E->isArrow()) {
6879 BaseValue = EmitScalarExpr(BaseExpr);
6880 ObjectTy = BaseExpr->getType()->getPointeeType();
6881 BaseQuals = ObjectTy.getQualifiers();
6882 } else {
6883 LValue BaseLV = EmitLValue(BaseExpr);
6884 BaseValue = BaseLV.getPointer(*this);
6885 ObjectTy = BaseExpr->getType();
6886 BaseQuals = ObjectTy.getQualifiers();
6887 }
6888
6889 LValue LV =
6890 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
6891 BaseQuals.getCVRQualifiers());
6893 return LV;
6894}
6895
6897 // Can only get l-value for message expression returning aggregate type
6898 RValue RV = EmitAnyExprToTemp(E);
6899 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6901}
6902
6904 const CGCallee &OrigCallee, const CallExpr *E,
6906 llvm::Value *Chain,
6907 llvm::CallBase **CallOrInvoke,
6908 CGFunctionInfo const **ResolvedFnInfo) {
6909 // Get the actual function type. The callee type will always be a pointer to
6910 // function type or a block pointer type.
6911 assert(CalleeType->isFunctionPointerType() &&
6912 "Call must have function pointer type!");
6913
6914 const Decl *TargetDecl =
6915 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
6916
6917 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
6918 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
6919 "trying to emit a call to an immediate function");
6920
6921 CalleeType = getContext().getCanonicalType(CalleeType);
6922
6923 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
6924
6925 CGCallee Callee = OrigCallee;
6926
6927 bool CFIUnchecked = CalleeType->hasPointeeToCFIUncheckedCalleeFunctionType();
6928
6929 if (SanOpts.has(SanitizerKind::Function) &&
6930 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6931 !isa<FunctionNoProtoType>(PointeeType) && !CFIUnchecked) {
6932 if (llvm::Constant *PrefixSig =
6933 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
6934 auto CheckOrdinal = SanitizerKind::SO_Function;
6935 auto CheckHandler = SanitizerHandler::FunctionTypeMismatch;
6936 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6937 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6938
6939 llvm::Type *PrefixSigType = PrefixSig->getType();
6940 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6941 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6942
6943 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6944 if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
6945 // Use raw pointer since we are using the callee pointer as data here.
6946 Address Addr =
6947 Address(CalleePtr, CalleePtr->getType(),
6949 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6950 Callee.getPointerAuthInfo(), nullptr);
6951 CalleePtr = Addr.emitRawPointer(*this);
6952 }
6953
6954 // On 32-bit Arm, the low bit of a function pointer indicates whether
6955 // it's using the Arm or Thumb instruction set. The actual first
6956 // instruction lives at the same address either way, so we must clear
6957 // that low bit before using the function address to find the prefix
6958 // structure.
6959 //
6960 // This applies to both Arm and Thumb target triples, because
6961 // either one could be used in an interworking context where it
6962 // might be passed function pointers of both types.
6963 llvm::Value *AlignedCalleePtr;
6964 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6965 AlignedCalleePtr = Builder.CreateIntrinsic(
6966 CalleePtr->getType(), llvm::Intrinsic::ptrmask,
6967 {CalleePtr, llvm::ConstantInt::getSigned(IntPtrTy, ~1)});
6968 } else {
6969 AlignedCalleePtr = CalleePtr;
6970 }
6971
6972 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6973 llvm::Value *CalleeSigPtr =
6974 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6975 llvm::Value *CalleeSig =
6976 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6977 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6978
6979 llvm::BasicBlock *Cont = createBasicBlock("cont");
6980 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6981 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6982
6983 EmitBlock(TypeCheck);
6984 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6985 Int32Ty,
6986 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6987 getPointerAlign());
6988 llvm::Value *CalleeTypeHashMatch =
6989 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6990 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6991 EmitCheckTypeDescriptor(CalleeType)};
6992 EmitCheck(std::make_pair(CalleeTypeHashMatch, CheckOrdinal), CheckHandler,
6993 StaticData, {CalleePtr});
6994
6995 Builder.CreateBr(Cont);
6996 EmitBlock(Cont);
6997 }
6998 }
6999
7000 const auto *FnType = cast<FunctionType>(PointeeType);
7001
7002 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
7003 FD && DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
7004 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType);
7005
7006 // If we are checking indirect calls and this call is indirect, check that the
7007 // function pointer is a member of the bit set for the function type.
7008 if (SanOpts.has(SanitizerKind::CFIICall) &&
7009 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && !CFIUnchecked) {
7010 auto CheckOrdinal = SanitizerKind::SO_CFIICall;
7011 auto CheckHandler = SanitizerHandler::CFICheckFail;
7012 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
7013 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
7014
7015 llvm::Metadata *MD =
7016 CGM.CreateMetadataIdentifierForFnType(QualType(FnType, 0));
7017
7018 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
7019
7020 llvm::Value *CalleePtr = Callee.getFunctionPointer();
7021 llvm::Value *TypeTest = Builder.CreateCall(
7022 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
7023
7024 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
7025 llvm::Constant *StaticData[] = {
7026 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
7029 };
7030 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
7031 EmitCfiSlowPathCheck(CheckOrdinal, TypeTest, CrossDsoTypeId, CalleePtr,
7032 StaticData);
7033 } else {
7034 EmitCheck(std::make_pair(TypeTest, CheckOrdinal), CheckHandler,
7035 StaticData, {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
7036 }
7037 }
7038
7039 CallArgList Args;
7040 if (Chain)
7041 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
7042
7043 // C++17 requires that we evaluate arguments to a call using assignment syntax
7044 // right-to-left, and that we evaluate arguments to certain other operators
7045 // left-to-right. Note that we allow this to override the order dictated by
7046 // the calling convention on the MS ABI, which means that parameter
7047 // destruction order is not necessarily reverse construction order.
7048 // FIXME: Revisit this based on C++ committee response to unimplementability.
7050 bool StaticOperator = false;
7051 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
7052 if (OCE->isAssignmentOp())
7054 else {
7055 switch (OCE->getOperator()) {
7056 case OO_LessLess:
7057 case OO_GreaterGreater:
7058 case OO_AmpAmp:
7059 case OO_PipePipe:
7060 case OO_Comma:
7061 case OO_ArrowStar:
7063 break;
7064 default:
7065 break;
7066 }
7067 }
7068
7069 if (const auto *MD =
7070 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
7071 MD && MD->isStatic())
7072 StaticOperator = true;
7073 }
7074
7075 auto Arguments = E->arguments();
7076 if (StaticOperator) {
7077 // If we're calling a static operator, we need to emit the object argument
7078 // and ignore it.
7079 EmitIgnoredExpr(E->getArg(0));
7080 Arguments = drop_begin(Arguments, 1);
7081 }
7082 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
7083 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
7084
7085 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
7086 Args, FnType, /*ChainCall=*/Chain);
7087
7088 if (ResolvedFnInfo)
7089 *ResolvedFnInfo = &FnInfo;
7090
7091 // HIP function pointer contains kernel handle when it is used in triple
7092 // chevron. The kernel stub needs to be loaded from kernel handle and used
7093 // as callee.
7094 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
7096 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
7097 llvm::Value *Handle = Callee.getFunctionPointer();
7098 auto *Stub = Builder.CreateLoad(
7099 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
7100 Callee.setFunctionPointer(Stub);
7101 }
7102
7103 // Insert function pointer lookup if this is a target call
7104 //
7105 // This is used for the indirect function case, virtual function case is
7106 // handled in ItaniumCXXABI.cpp
7107 if (getLangOpts().OpenMPIsTargetDevice && CGM.getTriple().isGPU() &&
7108 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
7109 const Expr *CalleeExpr = E->getCallee()->IgnoreParenImpCasts();
7110 const DeclRefExpr *DRE = nullptr;
7111 while (CalleeExpr) {
7112 if ((DRE = dyn_cast<DeclRefExpr>(CalleeExpr)))
7113 break;
7114 if (const auto *ME = dyn_cast<MemberExpr>(CalleeExpr))
7115 CalleeExpr = ME->getBase()->IgnoreParenImpCasts();
7116 else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(CalleeExpr))
7117 CalleeExpr = ASE->getBase()->IgnoreParenImpCasts();
7118 else
7119 break;
7120 }
7121
7122 const auto *VD = DRE ? dyn_cast<VarDecl>(DRE->getDecl()) : nullptr;
7123 if (VD && VD->hasAttr<OMPTargetIndirectCallAttr>()) {
7124 auto *FuncPtrTy = llvm::PointerType::get(
7125 CGM.getLLVMContext(), CGM.getDataLayout().getProgramAddressSpace());
7126 llvm::Type *RtlFnArgs[] = {FuncPtrTy};
7127 llvm::FunctionCallee DeviceRtlFn = CGM.CreateRuntimeFunction(
7128 llvm::FunctionType::get(FuncPtrTy, RtlFnArgs, false),
7129 "__llvm_omp_indirect_call_lookup");
7130 llvm::Value *Func = Callee.getFunctionPointer();
7131 llvm::Type *BackupTy = Func->getType();
7132 Func = Builder.CreatePointerBitCastOrAddrSpaceCast(Func, FuncPtrTy);
7133 Func = EmitRuntimeCall(DeviceRtlFn, {Func});
7134 Func = Builder.CreatePointerBitCastOrAddrSpaceCast(Func, BackupTy);
7135 Callee.setFunctionPointer(Func);
7136 }
7137 }
7138
7139 llvm::CallBase *LocalCallOrInvoke = nullptr;
7140 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
7141 E == MustTailCall, E->getExprLoc());
7142
7143 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
7144 if (CalleeDecl->hasAttr<RestrictAttr>() ||
7145 CalleeDecl->hasAttr<MallocSpanAttr>() ||
7146 CalleeDecl->hasAttr<AllocSizeAttr>()) {
7147 // Function has 'malloc' (aka. 'restrict') or 'alloc_size' attribute.
7148 if (SanOpts.has(SanitizerKind::AllocToken)) {
7149 // Set !alloc_token metadata.
7150 EmitAllocToken(LocalCallOrInvoke, E);
7151 }
7152 }
7153 }
7154 if (CallOrInvoke)
7155 *CallOrInvoke = LocalCallOrInvoke;
7156
7157 return Call;
7158}
7159
7162 Address BaseAddr = Address::invalid();
7163 if (E->getOpcode() == BO_PtrMemI) {
7164 BaseAddr = EmitPointerWithAlignment(E->getLHS());
7165 } else {
7166 BaseAddr = EmitLValue(E->getLHS()).getAddress();
7167 }
7168
7169 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
7170 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
7171
7172 LValueBaseInfo BaseInfo;
7173 TBAAAccessInfo TBAAInfo;
7174 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
7177 E, BaseAddr, OffsetV, MPT, IsInBounds, &BaseInfo, &TBAAInfo);
7178
7179 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
7180}
7181
7182/// Given the address of a temporary variable, produce an r-value of
7183/// its type.
7185 QualType type,
7186 SourceLocation loc) {
7188 switch (getEvaluationKind(type)) {
7189 case TEK_Complex:
7190 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
7191 case TEK_Aggregate:
7192 return lvalue.asAggregateRValue();
7193 case TEK_Scalar:
7194 return RValue::get(EmitLoadOfScalar(lvalue, loc));
7195 }
7196 llvm_unreachable("bad evaluation kind");
7197}
7198
7199void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
7200 assert(Val->getType()->isFPOrFPVectorTy());
7201 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
7202 return;
7203
7204 llvm::MDBuilder MDHelper(getLLVMContext());
7205 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
7206
7207 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
7208}
7209
7211 llvm::Type *EltTy = Val->getType()->getScalarType();
7212 if (!EltTy->isFloatTy() && !EltTy->isHalfTy())
7213 return;
7214
7215 if ((getLangOpts().OpenCL &&
7216 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
7217 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
7218 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
7219 // OpenCL v1.1 s7.4: minimum accuracy of single precision sqrt is 3 ulp.
7220 // OpenCL v3.0 s7.4: minimum accuracy of half precision sqrt is 1.5 ulp.
7221 //
7222 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
7223 // build option allows an application to specify that single precision
7224 // floating-point divide (x/y and 1/x) and sqrt used in the program
7225 // source are correctly rounded.
7226 //
7227 // TODO: CUDA has a prec-sqrt flag
7228 SetFPAccuracy(Val, EltTy->isFloatTy() ? 3.0f : 1.5f);
7229 }
7230}
7231
7233 llvm::Type *EltTy = Val->getType()->getScalarType();
7234 if (!EltTy->isFloatTy() && !EltTy->isHalfTy())
7235 return;
7236
7237 if ((getLangOpts().OpenCL &&
7238 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
7239 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
7240 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
7241 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5 ulp.
7242 // OpenCL v3.0 s7.4: minimum accuracy of half precision / is 1 ulp.
7243 //
7244 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
7245 // build option allows an application to specify that single precision
7246 // floating-point divide (x/y and 1/x) and sqrt used in the program
7247 // source are correctly rounded.
7248 //
7249 // TODO: CUDA has a prec-div flag
7250 SetFPAccuracy(Val, EltTy->isFloatTy() ? 2.5f : 1.f);
7251 }
7252}
7253
7254namespace {
7255 struct LValueOrRValue {
7256 LValue LV;
7257 RValue RV;
7258 };
7259}
7260
7261static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
7262 const PseudoObjectExpr *E,
7263 bool forLValue,
7264 AggValueSlot slot) {
7266
7267 // Find the result expression, if any.
7268 const Expr *resultExpr = E->getResultExpr();
7269 LValueOrRValue result;
7270
7272 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
7273 const Expr *semantic = *i;
7274
7275 // If this semantic expression is an opaque value, bind it
7276 // to the result of its source expression.
7277 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
7278 // Skip unique OVEs.
7279 if (ov->isUnique()) {
7280 assert(ov != resultExpr &&
7281 "A unique OVE cannot be used as the result expression");
7282 continue;
7283 }
7284
7285 // If this is the result expression, we may need to evaluate
7286 // directly into the slot.
7288 OVMA opaqueData;
7289 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
7291 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
7292 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
7294 opaqueData = OVMA::bind(CGF, ov, LV);
7295 result.RV = slot.asRValue();
7296
7297 // Otherwise, emit as normal.
7298 } else {
7299 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
7300
7301 // If this is the result, also evaluate the result now.
7302 if (ov == resultExpr) {
7303 if (forLValue)
7304 result.LV = CGF.EmitLValue(ov);
7305 else
7306 result.RV = CGF.EmitAnyExpr(ov, slot);
7307 }
7308 }
7309
7310 opaques.push_back(opaqueData);
7311
7312 // Otherwise, if the expression is the result, evaluate it
7313 // and remember the result.
7314 } else if (semantic == resultExpr) {
7315 if (forLValue)
7316 result.LV = CGF.EmitLValue(semantic);
7317 else
7318 result.RV = CGF.EmitAnyExpr(semantic, slot);
7319
7320 // Otherwise, evaluate the expression in an ignored context.
7321 } else {
7322 CGF.EmitIgnoredExpr(semantic);
7323 }
7324 }
7325
7326 // Unbind all the opaques now.
7327 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques)
7328 opaque.unbind(CGF);
7329
7330 return result;
7331}
7332
7334 AggValueSlot slot) {
7335 return emitPseudoObjectExpr(*this, E, false, slot).RV;
7336}
7337
7341
7343 LValue Val, SmallVectorImpl<LValue> &AccessList) {
7344
7346 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
7347 WorkList;
7348 llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32);
7349 WorkList.push_back({Val, Val.getType(), {llvm::ConstantInt::get(IdxTy, 0)}});
7350
7351 while (!WorkList.empty()) {
7352 auto [LVal, T, IdxList] = WorkList.pop_back_val();
7353 T = T.getCanonicalType().getUnqualifiedType();
7354 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) {
7355 uint64_t Size = CAT->getZExtSize();
7356 for (int64_t I = Size - 1; I > -1; I--) {
7357 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7358 IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
7359 WorkList.emplace_back(LVal, CAT->getElementType(), IdxListCopy);
7360 }
7361 } else if (const auto *RT = dyn_cast<RecordType>(T)) {
7362 const RecordDecl *Record = RT->getDecl()->getDefinitionOrSelf();
7363 assert(!Record->isUnion() && "Union types not supported in flat cast.");
7364
7365 const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
7366
7368 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
7369 ReverseList;
7370 if (CXXD && CXXD->isStandardLayout())
7372
7373 // deal with potential base classes
7374 if (CXXD && !CXXD->isStandardLayout()) {
7375 if (CXXD->getNumBases() > 0) {
7376 assert(CXXD->getNumBases() == 1 &&
7377 "HLSL doesn't support multiple inheritance.");
7378 auto Base = CXXD->bases_begin();
7379 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7380 IdxListCopy.push_back(llvm::ConstantInt::get(
7381 IdxTy, 0)); // base struct should be at index zero
7382 ReverseList.emplace_back(LVal, Base->getType(), IdxListCopy);
7383 }
7384 }
7385
7386 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(Record);
7387
7388 llvm::Type *LLVMT = ConvertTypeForMem(T);
7390 LValue RLValue;
7391 bool createdGEP = false;
7392 for (auto *FD : Record->fields()) {
7393 if (FD->isBitField()) {
7394 if (FD->isUnnamedBitField())
7395 continue;
7396 if (!createdGEP) {
7397 createdGEP = true;
7398 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
7399 LLVMT, Align, "gep");
7400 RLValue = MakeAddrLValue(GEP, T);
7401 }
7402 LValue FieldLVal = EmitLValueForField(RLValue, FD, true);
7403 ReverseList.push_back({FieldLVal, FD->getType(), {}});
7404 } else {
7405 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7406 IdxListCopy.push_back(
7407 llvm::ConstantInt::get(IdxTy, Layout.getLLVMFieldNo(FD)));
7408 ReverseList.emplace_back(LVal, FD->getType(), IdxListCopy);
7409 }
7410 }
7411
7412 std::reverse(ReverseList.begin(), ReverseList.end());
7413 llvm::append_range(WorkList, ReverseList);
7414 } else if (const auto *VT = dyn_cast<VectorType>(T)) {
7415 llvm::Type *LLVMT = ConvertTypeForMem(T);
7417 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT,
7418 Align, "vector.gep");
7419 LValue Base = MakeAddrLValue(GEP, T);
7420 for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) {
7421 llvm::Constant *Idx = llvm::ConstantInt::get(IdxTy, I);
7422 LValue LV =
7423 LValue::MakeVectorElt(Base.getAddress(), Idx, VT->getElementType(),
7424 Base.getBaseInfo(), TBAAAccessInfo());
7425 AccessList.emplace_back(LV);
7426 }
7427 } else if (const auto *MT = dyn_cast<ConstantMatrixType>(T)) {
7428 // Matrices are represented as flat arrays in memory, but has a vector
7429 // value type. So we use ConvertMatrixAddress to convert the address from
7430 // array to vector, and extract elements similar to the vector case above.
7431 // The matrix elements are iterated over in row-major order regardless of
7432 // the memory layout of the matrix.
7433 llvm::Type *LLVMT = ConvertTypeForMem(T);
7435 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT,
7436 Align, "matrix.gep");
7437 LValue Base = MakeAddrLValue(GEP, T);
7438 Address MatAddr = MaybeConvertMatrixAddress(Base.getAddress(), *this);
7439 unsigned NumRows = MT->getNumRows();
7440 unsigned NumCols = MT->getNumColumns();
7441 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
7443 llvm::MatrixBuilder MB(Builder);
7444 for (unsigned Row = 0; Row < MT->getNumRows(); Row++) {
7445 for (unsigned Col = 0; Col < MT->getNumColumns(); Col++) {
7446 llvm::Value *RowIdx = llvm::ConstantInt::get(IdxTy, Row);
7447 llvm::Value *ColIdx = llvm::ConstantInt::get(IdxTy, Col);
7448 llvm::Value *Idx = MB.CreateIndex(RowIdx, ColIdx, NumRows, NumCols,
7449 IsMatrixRowMajor);
7450 LValue LV =
7451 LValue::MakeMatrixElt(MatAddr, Idx, MT->getElementType(),
7452 Base.getBaseInfo(), TBAAAccessInfo());
7453 AccessList.emplace_back(LV);
7454 }
7455 }
7456 } else { // a scalar/builtin type
7457 if (!IdxList.empty()) {
7458 llvm::Type *LLVMT = ConvertTypeForMem(T);
7460 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
7461 LLVMT, Align, "gep");
7462 AccessList.emplace_back(MakeAddrLValue(GEP, T));
7463 } else // must be a bitfield we already created an lvalue for
7464 AccessList.emplace_back(LVal);
7465 }
7466 }
7467}
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition CGExpr.cpp:3249
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition CGExpr.cpp:3520
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool StrictBool, bool IsBool)
Definition CGExpr.cpp:2067
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition CGExpr.cpp:726
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition CGExpr.cpp:4661
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition CGExpr.cpp:4862
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition CGExpr.cpp:4726
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type?
Definition CGExpr.cpp:1915
@ CEK_AsReferenceOnly
Definition CGExpr.cpp:1917
@ CEK_AsValueOnly
Definition CGExpr.cpp:1919
@ CEK_None
Definition CGExpr.cpp:1916
@ CEK_AsValueOrReference
Definition CGExpr.cpp:1918
static Address emitRawAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field, bool IsInBounds)
Drill down to the storage of a field without walking into reference types, and without respect for po...
Definition CGExpr.cpp:5690
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition CGExpr.cpp:1888
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition CGExpr.cpp:3508
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition CGExpr.cpp:6001
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition CGExpr.cpp:4100
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition CGExpr.cpp:4675
SmallVector< llvm::Value *, 8 > RecIndicesTy
Definition CGExpr.cpp:1166
static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD)
Definition CGExpr.cpp:6565
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition CGExpr.cpp:3495
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition CGExpr.cpp:2308
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition CGExpr.cpp:7261
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition CGExpr.cpp:4742
static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID)
Definition CGExpr.cpp:93
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition CGExpr.cpp:1018
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition CGExpr.cpp:2486
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition CGExpr.cpp:1921
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field, bool IsInBounds)
Drill down to the storage of a field without walking into reference types, wrapping the address in an...
Definition CGExpr.cpp:5720
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition CGExpr.cpp:4893
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition CGExpr.cpp:6513
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition CGExpr.cpp:3346
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition CGExpr.cpp:1168
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition CGExpr.cpp:6506
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition CGExpr.cpp:3444
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition CGExpr.cpp:5743
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition CGExpr.cpp:4755
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition CGExpr.cpp:3360
VariableTypeDescriptorKind
Definition CGExpr.cpp:78
@ TK_Float
A floating-point type.
Definition CGExpr.cpp:82
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition CGExpr.cpp:86
@ TK_Integer
An integer type.
Definition CGExpr.cpp:80
@ TK_BitInt
An _BitInt(N) type.
Definition CGExpr.cpp:84
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition CGExpr.cpp:2407
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition CGExpr.cpp:1458
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition CGExpr.cpp:5730
const SanitizerHandlerInfo SanitizerHandlers[]
Definition CGExpr.cpp:4117
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition CGExpr.cpp:4123
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition CGExpr.cpp:5232
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static Address emitAddrOfZeroSizeField(CIRGenFunction &cgf, Address base, const FieldDecl *field)
Get the address of a zero-sized field within a record.
FormatToken * Previous
The previous token in the unwrapped line.
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
Defines the clang::Module class, which describes a module in the source code.
llvm::json::Object Object
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
#define LIST_SANITIZER_CHECKS
SanitizerHandler
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
a trap message and trap category.
const LValueBase getLValueBase() const
Definition APValue.cpp:1015
bool isLValue() const
Definition APValue.h:490
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
SourceManager & getSourceManager()
Definition ASTContext.h:866
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
bool isPFPField(const FieldDecl *Field) const
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
Builtin::Context & BuiltinInfo
Definition ASTContext.h:807
const LangOptions & getLangOpts() const
Definition ASTContext.h:959
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
unsigned getTargetAddressSpace(LangAS AS) const
bool isSentinelNullExpr(const Expr *E)
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition Expr.h:7219
Expr * getBase()
Get base of the array section.
Definition Expr.h:7297
Expr * getLength()
Get length of array section.
Definition Expr.h:7307
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition Expr.cpp:5392
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:7336
Expr * getLowerBound()
Get lower bound of array section.
Definition Expr.h:7301
bool isOpenACCArraySection() const
Definition Expr.h:7294
SourceLocation getColonLocFirst() const
Definition Expr.h:7328
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2779
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2753
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3781
QualType getElementType() const
Definition TypeBase.h:3793
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
SourceLocation getExprLoc() const
Definition Expr.h:4082
Expr * getRHS() const
Definition Expr.h:4093
static bool isAdditiveOp(Opcode Opc)
Definition Expr.h:4127
Opcode getOpcode() const
Definition Expr.h:4086
A fixed int type of a specified bitwidth.
Definition TypeBase.h:8292
unsigned getNumBits() const
Definition TypeBase.h:8304
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition Builtins.h:321
Represents binding an expression to a temporary.
Definition ExprCXX.h:1497
CXXTemporary * getTemporary()
Definition ExprCXX.h:1515
const Expr * getSubExpr() const
Definition ExprCXX.h:1519
Represents a call to a C++ constructor.
Definition ExprCXX.h:1552
Represents a C++ destructor within a class.
Definition DeclCXX.h:2882
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1372
bool isStandardLayout() const
Determine whether this class is standard-layout per C++ [class]p7.
Definition DeclCXX.h:1225
unsigned getNumBases() const
Retrieves the number of base classes of this class.
Definition DeclCXX.h:602
base_class_iterator bases_begin()
Definition DeclCXX.h:615
bool isDynamicClass() const
Definition DeclCXX.h:574
bool hasDefinition() const
Definition DeclCXX.h:561
const CXXRecordDecl * getStandardLayoutBaseWithFields() const
If this is a standard-layout class or union, any and all data members will be declared in the same ty...
Definition DeclCXX.cpp:562
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:852
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition ExprCXX.h:1072
MSGuidDecl * getGuidDecl() const
Definition ExprCXX.h:1118
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
bool isCoroElideSafe() const
Definition Expr.h:3120
arg_range arguments()
Definition Expr.h:3198
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1608
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
path_iterator path_begin()
Definition Expr.h:3749
CastKind getCastKind() const
Definition Expr.h:3723
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
path_iterator path_end()
Definition Expr.h:3750
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
@ None
Trap Messages are omitted.
@ Detailed
Trap Message includes more context (e.g.
@ Strict
In-memory bool values are assumed to be 0 or 1, and any other value is UB.
bool isOptimizedBuild() const
Are we building at -O1 or higher?
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:261
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
bool isValid() const
Definition Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:551
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:619
Address getAddress() const
Definition CGValue.h:691
void setExternallyDestructed(bool destructed=true)
Definition CGValue.h:660
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:649
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
RValue asRValue() const
Definition CGValue.h:713
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition CGBuilder.h:315
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition CGBuilder.h:302
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition CGBuilder.h:341
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition CGBuilder.h:251
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:229
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Definition CGBuilder.h:325
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition CGBuilder.h:445
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:199
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
Abstract information about a function or function prototype.
Definition CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition CGCall.h:59
All available information about a concrete callee.
Definition CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CGCall.h:172
bool isPseudoDestructor() const
Definition CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition CGCall.h:123
unsigned getBuiltinID() const
Definition CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
bool isBuiltin() const
Definition CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
RawAddress createBufferMatrixTempAddress(const LValue &LV, SourceLocation Loc, CodeGenFunction &CGF)
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
Definition CGCall.h:320
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
Definition CGExpr.cpp:5209
LValue EmitCoawaitLValue(const CoawaitExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2812
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2184
void EmitBoundsCheckImpl(const Expr *ArrayExpr, QualType ArrayBaseType, llvm::Value *IndexVal, QualType IndexType, llvm::Value *BoundsVal, QualType BoundsType, bool Accessed)
Definition CGExpr.cpp:1277
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
Definition CGExpr.cpp:3417
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:591
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
Definition CGExpr.cpp:6798
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:6106
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3711
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
Definition CGExpr.cpp:1358
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7232
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitInitListLValue(const InitListExpr *E)
Definition CGExpr.cpp:5988
bool isUnderlyingBasePointerConstantNull(const Expr *E)
Check whether the underlying base pointer is a constant null.
Definition CGExpr.cpp:5533
void EmitARCInitWeak(Address addr, llvm::Value *value)
i8* @objc_initWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2695
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
Definition CGExpr.cpp:4973
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
Definition CGExpr.cpp:6830
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1199
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
llvm::Type * ConvertType(QualType T)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6811
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
CGCapturedStmtInfo * CapturedStmtInfo
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2360
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
Definition CGClass.cpp:281
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
Definition CGExpr.cpp:3114
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Definition CGExpr.cpp:3883
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
Definition CGExpr.cpp:5961
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:7184
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:3035
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4035
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6816
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7210
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Emit a CallExpr without considering whether it might be a subclass.
Definition CGExpr.cpp:6495
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
Definition CGExpr.cpp:735
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7333
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
Definition CGExpr.cpp:5656
const LangOptions & getLangOpts() const
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
Definition CGExpr.cpp:4403
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:698
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2577
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition CGExpr.cpp:7161
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
Definition CGExpr.cpp:6864
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:388
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:6088
void EmitCountedByBoundsChecking(const Expr *ArrayExpr, QualType ArrayType, Address ArrayInst, QualType IndexType, llvm::Value *IndexVal, bool Accessed, bool FlexibleArray)
EmitCountedByBoundsChecking - If the array being accessed has a "counted_by" attribute,...
Definition CGExpr.cpp:4923
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition CGDecl.cpp:2299
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
Definition CGDecl.cpp:788
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:766
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
Definition CGExpr.cpp:3426
RValue EmitLoadOfGlobalRegLValue(LValue LV)
Load of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:2749
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2953
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6646
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition CGDecl.cpp:2272
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
Definition CGExpr.cpp:2101
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition CGExpr.cpp:7338
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3925
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
Definition CGExpr.cpp:6344
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
Definition CGExpr.cpp:977
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6856
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Definition CGExpr.cpp:2503
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5762
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:181
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, bool IsInBounds, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Emit the address of a field using a member data pointer.
Definition CGClass.cpp:150
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
Definition CGExpr.cpp:6367
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
Definition CGExpr.cpp:740
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6571
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:257
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6445
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2521
LValue EmitMatrixSingleSubscriptExpr(const MatrixSingleSubscriptExpr *E)
Definition CGExpr.cpp:5193
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
Definition CGExpr.cpp:5271
Address GetAddrOfBlockDecl(const VarDecl *var)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
Definition CGExpr.cpp:4365
RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateIRTempWithoutCast - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:188
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
Definition CGExpr.cpp:7199
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1261
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:238
LValue EmitPredefinedLValue(const PredefinedExpr *E)
Definition CGExpr.cpp:3888
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4183
LValue EmitDeclRefLValue(const DeclRefExpr *E)
Definition CGExpr.cpp:3592
LValue EmitStringLiteralLValue(const StringLiteral *E)
Definition CGExpr.cpp:3878
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6398
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2043
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1357
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1646
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1322
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5936
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2243
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:158
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6384
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
Definition CGExpr.cpp:2115
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5451
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6761
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition CGExpr.cpp:230
LValue EmitVAArgExprLValue(const VAArgExpr *E)
Definition CGExpr.cpp:6793
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
Definition CGExpr.cpp:298
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitStmtExprLValue(const StmtExpr *E)
Definition CGExpr.cpp:6896
llvm::Value * EmitARCLoadWeakRetained(Address addr)
i8* @objc_loadWeakRetained(i8** addr)
Definition CGObjC.cpp:2675
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Definition CGExpr.cpp:107
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
Definition CGExpr.cpp:6872
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2773
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4615
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2352
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1614
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
void EmitAllocToken(llvm::CallBase *CB, QualType AllocType)
Emit and set additional metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1336
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
LValue EmitCastLValue(const CastExpr *E)
EmitCastLValue - Casts are never lvalues unless that cast is to a reference type.
Definition CGExpr.cpp:6156
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
Definition CGExpr.cpp:521
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
Definition CGExpr.cpp:3436
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3997
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:308
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:279
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
Definition CGExpr.cpp:5435
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1640
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
Definition CGExpr.cpp:6350
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Definition CGExpr.cpp:6844
llvm::Type * ConvertTypeForMem(QualType T)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6778
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition CGExpr.cpp:2636
llvm::Value * EmitARCLoadWeak(Address addr)
i8* @objc_loadWeak(i8** addr) Essentially objc_autorelease(objc_loadWeakRetained(addr)).
Definition CGObjC.cpp:2668
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5650
void markStmtMaybeUsed(const Stmt *S)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6850
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7342
LValue EmitCoyieldLValue(const CoyieldExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
Definition CGExpr.cpp:4317
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
Definition CGExpr.cpp:3811
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1597
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1678
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:194
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
Definition CGExpr.cpp:3384
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
Definition CGExpr.cpp:6417
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
Definition CGObjC.cpp:2192
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
Definition CGExpr.cpp:6822
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:748
Address EmitExtVectorElementLValue(LValue V)
Generates lvalue for partial ext_vector access.
Definition CGExpr.cpp:2731
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition CGExpr.cpp:338
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition CGExpr.cpp:2673
static bool hasAggregateEvaluationKind(QualType T)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
Definition CGExpr.cpp:1655
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
Definition CGCall.cpp:4864
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:5185
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4600
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4522
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2277
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1253
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4510
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
Definition CGExpr.cpp:6807
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
generateDestroyHelper - Generates a helper function which, when invoked, destroys the given object.
LValue EmitMemberExpr(const MemberExpr *E)
Definition CGExpr.cpp:5540
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1940
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1713
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Store of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:3224
bool isOpaqueValueEmitted(const OpaqueValueExpr *E)
isOpaqueValueEmitted - Return true if the opaque value expression has already been emitted.
Definition CGExpr.cpp:6411
std::pair< llvm::Value *, CGPointerAuthInfo > EmitOrigPointerRValue(const Expr *E)
Retrieve a pointer rvalue and its ptrauth info.
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2683
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitMatrixElementExpr(const MatrixElementExpr *E)
Definition CGExpr.cpp:2336
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
Definition CGExpr.cpp:720
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
Definition CGExpr.cpp:1607
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:643
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1393
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * performAddrSpaceCast(llvm::Constant *Src, llvm::Type *DestTy)
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition CGExpr.cpp:3483
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition CGCXX.cpp:252
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A specialization of Address that requires the address to be an LLVM Constant.
Definition Address.h:296
llvm::Constant * getPointer() const
Definition Address.h:308
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
AlignmentSource getAlignmentSource() const
Definition CGValue.h:172
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getMatrixRowIdx() const
Definition CGValue.h:412
static LValue MakeMatrixRow(Address Addr, llvm::Value *RowIdx, QualType MatrixTy, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:510
bool isBitField() const
Definition CGValue.h:288
bool isMatrixElt() const
Definition CGValue.h:291
Expr * getBaseIvarExp() const
Definition CGValue.h:344
llvm::Constant * getExtVectorElts() const
Definition CGValue.h:431
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition CGValue.h:500
llvm::Constant * getMatrixRowElts() const
Definition CGValue.h:417
bool isObjCStrong() const
Definition CGValue.h:336
bool isMatrixRowSwizzle() const
Definition CGValue.h:293
bool isGlobalObjCRef() const
Definition CGValue.h:318
bool isVectorElt() const
Definition CGValue.h:287
bool isSimple() const
Definition CGValue.h:286
bool isVolatileQualified() const
Definition CGValue.h:297
RValue asAggregateRValue() const
Definition CGValue.h:545
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition CGValue.h:407
llvm::Value * getGlobalReg() const
Definition CGValue.h:452
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:454
bool isVolatile() const
Definition CGValue.h:340
const Qualifiers & getQuals() const
Definition CGValue.h:350
bool isGlobalReg() const
Definition CGValue.h:290
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:474
bool isObjCWeak() const
Definition CGValue.h:333
Address getAddress() const
Definition CGValue.h:373
unsigned getVRQualifiers() const
Definition CGValue.h:299
bool isMatrixRow() const
Definition CGValue.h:292
LValue setKnownNonNull()
Definition CGValue.h:362
bool isNonGC() const
Definition CGValue.h:315
bool isExtVectorElt() const
Definition CGValue.h:289
llvm::Value * getVectorIdx() const
Definition CGValue.h:394
void setNontemporal(bool Value)
Definition CGValue.h:331
LValueBaseInfo getBaseInfo() const
Definition CGValue.h:358
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition CGValue.h:327
QualType getType() const
Definition CGValue.h:303
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:446
bool isThreadLocalRef() const
Definition CGValue.h:321
KnownNonNull_t isKnownNonNull() const
Definition CGValue.h:361
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:347
void setNonGC(bool Value)
Definition CGValue.h:316
static LValue MakeMatrixRowSwizzle(Address MatAddr, llvm::Value *RowIdx, llvm::Constant *Cols, QualType MatrixTy, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:521
Address getVectorAddress() const
Definition CGValue.h:382
bool isNontemporal() const
Definition CGValue.h:330
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition CGValue.h:490
bool isObjCIvar() const
Definition CGValue.h:309
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:464
void setAddress(Address address)
Definition CGValue.h:375
Address getExtVectorAddress() const
Definition CGValue.h:423
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:535
Address getMatrixAddress() const
Definition CGValue.h:399
Address getBitFieldAddress() const
Definition CGValue.h:437
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
An abstract representation of an aligned address.
Definition Address.h:42
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition Address.h:93
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:77
llvm::Value * getPointer() const
Definition Address.h:66
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition Address.h:83
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:381
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3334
QualType getElementType() const
Definition TypeBase.h:3344
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
bool isFileScope() const
Definition Expr.h:3640
const Expr * getInitializer() const
Definition Expr.h:3636
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1085
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4446
unsigned getNumColumns() const
Returns the number of columns in the matrix.
Definition TypeBase.h:4465
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition TypeBase.h:4462
DeclContext * getLexicalParent()
getLexicalParent - Returns the containing lexical DeclContext.
Definition DeclBase.h:2138
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1477
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:493
ValueDecl * getDecl()
Definition Expr.h:1341
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1471
SourceLocation getLocation() const
Definition Expr.h:1349
T * getAttr() const
Definition DeclBase.h:581
SourceLocation getLocation() const
Definition DeclBase.h:447
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition DeclBase.cpp:576
DeclContext * getDeclContext()
Definition DeclBase.h:456
bool hasAttr() const
Definition DeclBase.h:585
const Expr * getBase() const
Definition Expr.h:6581
ExplicitCastExpr - An explicit cast written in the source code.
Definition Expr.h:3931
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:84
bool isGLValue() const
Definition Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3124
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:447
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3097
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3093
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition Expr.h:285
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition Expr.h:284
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1551
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3695
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3077
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:282
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:479
QualType getType() const
Definition Expr.h:144
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition Expr.cpp:3008
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6610
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4443
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4556
ExtVectorType - Extended vector type.
Definition TypeBase.h:4326
Represents a member of a struct/union/class.
Definition Decl.h:3178
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3281
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3263
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3414
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4850
const Expr * getSubExpr() const
Definition Expr.h:1065
Represents a function declaration or definition.
Definition Decl.h:2018
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3757
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5366
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4562
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition Expr.h:7397
const OpaqueValueExpr * getCastedTemporary() const
Definition Expr.h:7448
const OpaqueValueExpr * getOpaqueArgLValue() const
Definition Expr.h:7429
bool isInOut() const
returns true if the parameter is inout and false if the parameter is out.
Definition Expr.h:7456
const Expr * getWritebackCast() const
Definition Expr.h:7443
const Expr * getArgLValue() const
Return the l-value expression that was written as the argument in source.
Definition Expr.h:7438
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5597
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2469
const Expr * getInit(unsigned Init) const
Definition Expr.h:5357
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4920
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4945
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4970
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4588
MatrixSingleSubscriptExpr - Matrix single subscript expression for the MatrixType extension when you ...
Definition Expr.h:2798
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2838
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition Expr.h:2868
bool isIncomplete() const
Definition Expr.h:2888
QualType getElementType() const
Returns type of the elements being stored in the matrix.
Definition TypeBase.h:4410
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3591
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3562
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3712
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition NSAPI.cpp:481
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
ObjCEncodeExpr, used for @encode in Objective-C.
Definition ExprObjC.h:441
Represents an ObjC class declaration.
Definition DeclObjC.h:1154
ObjCIvarDecl - Represents an ObjC instance variable.
Definition DeclObjC.h:1952
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition ExprObjC.h:580
ObjCIvarDecl * getDecl()
Definition ExprObjC.h:610
bool isArrow() const
Definition ExprObjC.h:618
const Expr * getBase() const
Definition ExprObjC.h:614
An expression that sends a message to the given Objective-C object or class.
Definition ExprObjC.h:971
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1395
QualType getReturnType() const
Definition DeclObjC.h:329
ObjCSelectorExpr used for @selector in Objective-C.
Definition ExprObjC.h:486
Selector getSelector() const
Definition ExprObjC.h:500
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
bool isUnique() const
Definition Expr.h:1239
const Expr * getSubExpr() const
Definition Expr.h:2202
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3387
QualType getPointeeType() const
Definition TypeBase.h:3397
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
StringRef getIdentKindName() const
Definition Expr.h:2065
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2043
StringLiteral * getFunctionName()
Definition Expr.h:2052
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6804
semantics_iterator semantics_end()
Definition Expr.h:6869
semantics_iterator semantics_begin()
Definition Expr.h:6865
const Expr *const * const_semantics_iterator
Definition Expr.h:6864
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition Expr.h:6852
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8524
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1468
QualType withoutLocalFastQualifiers() const
Definition TypeBase.h:1229
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8566
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8480
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1453
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8625
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8534
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1194
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1560
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition TypeBase.h:1036
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool hasConst() const
Definition TypeBase.h:457
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void removeObjCGCAttr()
Definition TypeBase.h:523
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
void removePointerAuth()
Definition TypeBase.h:610
void setAddressSpace(LangAS space)
Definition TypeBase.h:591
bool hasVolatile() const
Definition TypeBase.h:467
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:603
ObjCLifetime getObjCLifetime() const
Definition TypeBase.h:545
Represents a struct/union/class.
Definition Decl.h:4343
field_range fields() const
Definition Decl.h:4546
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition Decl.h:4527
bool isAnonymousStructOrUnion() const
Whether this is an anonymous struct or union.
Definition Decl.h:4395
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition Expr.h:4598
StmtClass getStmtClass() const
Definition Stmt.h:1503
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
bool isUnion() const
Definition Decl.h:3946
Exposes information about the current target.
Definition TargetInfo.h:227
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual StringRef getABI() const
Get the ABI currently in use.
The base class of the type hierarchy.
Definition TypeBase.h:1875
bool isBlockPointerType() const
Definition TypeBase.h:8697
bool isVoidType() const
Definition TypeBase.h:9043
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2289
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:455
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition Type.cpp:2000
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9346
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8780
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isArrayType() const
Definition TypeBase.h:8776
bool isFunctionPointerType() const
Definition TypeBase.h:8744
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2422
bool isConstantMatrixType() const
Definition TypeBase.h:8844
bool isPointerType() const
Definition TypeBase.h:8677
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9087
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9337
bool isReferenceType() const
Definition TypeBase.h:8701
bool isEnumeralType() const
Definition TypeBase.h:8808
bool isVariableArrayType() const
Definition TypeBase.h:8788
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
bool isExtVectorBoolType() const
Definition TypeBase.h:8824
bool isBitIntType() const
Definition TypeBase.h:8952
bool isConstantMatrixBoolType() const
Definition TypeBase.h:8830
bool isAnyComplexType() const
Definition TypeBase.h:8812
bool hasPointeeToCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8729
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9223
bool isAtomicType() const
Definition TypeBase.h:8869
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2859
bool isObjectType() const
Determine whether this type is an object type.
Definition TypeBase.h:2567
bool isHLSLResourceRecord() const
Definition Type.cpp:5496
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
Definition Type.h:53
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2527
bool isFunctionType() const
Definition TypeBase.h:8673
bool isObjCObjectPointerType() const
Definition TypeBase.h:8856
bool isVectorType() const
Definition TypeBase.h:8816
bool isAnyPointerType() const
Definition TypeBase.h:8685
bool isSubscriptableVectorType() const
Definition TypeBase.h:8836
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9270
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition Type.cpp:690
bool isRecordType() const
Definition TypeBase.h:8804
bool isHLSLResourceRecordArray() const
Definition Type.cpp:5500
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2444
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8723
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a variable declaration or definition.
Definition Decl.h:924
TLSKind getTLSKind() const
Definition Decl.cpp:2147
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2345
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1182
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:950
@ TLS_None
Not a TLS variable.
Definition Decl.h:944
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4025
Represents a GCC generic vector type.
Definition TypeBase.h:4234
unsigned getNumElements() const
Definition TypeBase.h:4249
#define INT_MIN
Definition limits.h:55
Definition SPIR.cpp:35
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition CGValue.h:142
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ ARCImpreciseLifetime
Definition CGValue.h:137
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition CGValue.h:160
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition Specifiers.h:155
@ SC_Register
Definition Specifiers.h:258
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition Specifiers.h:340
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:343
@ SD_Static
Static storage duration.
Definition Specifiers.h:344
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:341
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:342
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:345
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
LangAS
Defines the address space values used by the address space qualifier of QualType.
llvm::cl::opt< bool > ClSanitizeGuardChecks
SmallVector< CXXBaseSpecifier *, 4 > CXXCastPath
A simple array of base specifiers.
Definition ASTContext.h:151
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
Definition TypeBase.h:5970
bool isLambdaMethod(const DeclContext *DC)
Definition ASTLambda.h:39
@ Other
Other implicit parameter.
Definition Decl.h:1763
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:178
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:181
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
uint64_t Offset
Offset - The byte offset of the final access within the base one.
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
llvm::MDNode * BaseType
BaseType - The base/leading access type.
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:615
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition Expr.h:68