clang 22.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGObjCRuntime.h"
21#include "CGOpenMPRuntime.h"
22#include "CGRecordLayout.h"
23#include "CodeGenFunction.h"
24#include "CodeGenModule.h"
25#include "CodeGenPGO.h"
26#include "ConstantEmitter.h"
27#include "TargetInfo.h"
29#include "clang/AST/ASTLambda.h"
30#include "clang/AST/Attr.h"
31#include "clang/AST/DeclObjC.h"
32#include "clang/AST/NSAPI.h"
36#include "clang/Basic/Module.h"
38#include "llvm/ADT/STLExtras.h"
39#include "llvm/ADT/ScopeExit.h"
40#include "llvm/ADT/StringExtras.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/LLVMContext.h"
44#include "llvm/IR/MDBuilder.h"
45#include "llvm/IR/MatrixBuilder.h"
46#include "llvm/Support/ConvertUTF.h"
47#include "llvm/Support/Endian.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Support/Path.h"
50#include "llvm/Support/xxhash.h"
51#include "llvm/Transforms/Utils/SanitizerStats.h"
52
53#include <numeric>
54#include <optional>
55#include <string>
56
57using namespace clang;
58using namespace CodeGen;
59
60namespace clang {
61// TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed
62// by -fsanitize-skip-hot-cutoff
63llvm::cl::opt<bool> ClSanitizeGuardChecks(
64 "ubsan-guard-checks", llvm::cl::Optional,
65 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
66
67} // namespace clang
68
69//===--------------------------------------------------------------------===//
70// Defines for metadata
71//===--------------------------------------------------------------------===//
72
73// Those values are crucial to be the SAME as in ubsan runtime library.
75 /// An integer type.
76 TK_Integer = 0x0000,
77 /// A floating-point type.
78 TK_Float = 0x0001,
79 /// An _BitInt(N) type.
80 TK_BitInt = 0x0002,
81 /// Any other type. The value representation is unspecified.
82 TK_Unknown = 0xffff
83};
84
85//===--------------------------------------------------------------------===//
86// Miscellaneous Helper Methods
87//===--------------------------------------------------------------------===//
88
89static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID) {
90 switch (ID) {
91#define SANITIZER_CHECK(Enum, Name, Version, Msg) \
92 case SanitizerHandler::Enum: \
93 return Msg;
95#undef SANITIZER_CHECK
96 }
97 llvm_unreachable("unhandled switch case");
98}
99
100/// CreateTempAlloca - This creates a alloca and inserts it into the entry
101/// block.
104 const Twine &Name,
105 llvm::Value *ArraySize) {
106 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
107 Alloca->setAlignment(Align.getAsAlign());
108 return RawAddress(Alloca, Ty, Align, KnownNonNull);
109}
110
111RawAddress CodeGenFunction::MaybeCastStackAddressSpace(RawAddress Alloca,
112 LangAS DestLangAS,
113 llvm::Value *ArraySize) {
114
115 llvm::Value *V = Alloca.getPointer();
116 // Alloca always returns a pointer in alloca address space, which may
117 // be different from the type defined by the language. For example,
118 // in C++ the auto variables are in the default address space. Therefore
119 // cast alloca to the default address space when necessary.
120
121 unsigned DestAddrSpace = getContext().getTargetAddressSpace(DestLangAS);
122 if (DestAddrSpace != Alloca.getAddressSpace()) {
123 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
124 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
125 // otherwise alloca is inserted at the current insertion point of the
126 // builder.
127 if (!ArraySize)
128 Builder.SetInsertPoint(getPostAllocaInsertPoint());
130 *this, V, getASTAllocaAddressSpace(), Builder.getPtrTy(DestAddrSpace),
131 /*IsNonNull=*/true);
132 }
133
134 return RawAddress(V, Alloca.getElementType(), Alloca.getAlignment(),
136}
137
139 CharUnits Align, const Twine &Name,
140 llvm::Value *ArraySize,
141 RawAddress *AllocaAddr) {
142 RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
143 if (AllocaAddr)
144 *AllocaAddr = Alloca;
145 return MaybeCastStackAddressSpace(Alloca, DestLangAS, ArraySize);
146}
147
148/// CreateTempAlloca - This creates an alloca and inserts it into the entry
149/// block if \p ArraySize is nullptr, otherwise inserts it at the current
150/// insertion point of the builder.
151llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
152 const Twine &Name,
153 llvm::Value *ArraySize) {
154 llvm::AllocaInst *Alloca;
155 if (ArraySize)
156 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
157 else
158 Alloca =
159 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
160 ArraySize, Name, AllocaInsertPt->getIterator());
161 if (SanOpts.Mask & SanitizerKind::Address) {
162 Alloca->addAnnotationMetadata({"alloca_name_altered", Name.str()});
163 }
164 if (Allocas) {
165 Allocas->Add(Alloca);
166 }
167 return Alloca;
168}
169
170/// CreateDefaultAlignTempAlloca - This creates an alloca with the
171/// default alignment of the corresponding LLVM type, which is *not*
172/// guaranteed to be related in any way to the expected alignment of
173/// an AST type that might have been lowered to Ty.
175 const Twine &Name) {
176 CharUnits Align =
177 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
178 return CreateTempAlloca(Ty, Align, Name);
179}
180
183 return CreateTempAlloca(ConvertType(Ty), Align, Name);
184}
185
187 RawAddress *Alloca) {
188 // FIXME: Should we prefer the preferred type alignment here?
189 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
190}
191
193 const Twine &Name,
194 RawAddress *Alloca) {
196 /*ArraySize=*/nullptr, Alloca);
197
198 if (Ty->isConstantMatrixType()) {
199 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
200 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
201 ArrayTy->getNumElements());
202
203 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
205 }
206 return Result;
207}
208
210 CharUnits Align,
211 const Twine &Name) {
212 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
213}
214
216 const Twine &Name) {
217 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
218 Name);
219}
220
221/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
222/// expression and compare the result against zero, returning an Int1Ty value.
224 PGO->setCurrentStmt(E);
225 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
226 llvm::Value *MemPtr = EmitScalarExpr(E);
227 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
228 }
229
230 QualType BoolTy = getContext().BoolTy;
231 SourceLocation Loc = E->getExprLoc();
232 CGFPOptionsRAII FPOptsRAII(*this, E);
233 if (!E->getType()->isAnyComplexType())
234 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
235
237 Loc);
238}
239
240/// EmitIgnoredExpr - Emit code to compute the specified expression,
241/// ignoring the result.
243 if (E->isPRValue())
244 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
245
246 // if this is a bitfield-resulting conditional operator, we can special case
247 // emit this. The normal 'EmitLValue' version of this is particularly
248 // difficult to codegen for, since creating a single "LValue" for two
249 // different sized arguments here is not particularly doable.
250 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
252 if (CondOp->getObjectKind() == OK_BitField)
253 return EmitIgnoredConditionalOperator(CondOp);
254 }
255
256 // Just emit it as an l-value and drop the result.
257 EmitLValue(E);
258}
259
260/// EmitAnyExpr - Emit code to compute the specified expression which
261/// can have any type. The result is returned as an RValue struct.
262/// If this is an aggregate expression, AggSlot indicates where the
263/// result should be returned.
265 AggValueSlot aggSlot,
266 bool ignoreResult) {
267 switch (getEvaluationKind(E->getType())) {
268 case TEK_Scalar:
269 return RValue::get(EmitScalarExpr(E, ignoreResult));
270 case TEK_Complex:
271 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
272 case TEK_Aggregate:
273 if (!ignoreResult && aggSlot.isIgnored())
274 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
275 EmitAggExpr(E, aggSlot);
276 return aggSlot.asRValue();
277 }
278 llvm_unreachable("bad evaluation kind");
279}
280
281/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
282/// always be accessible even if no aggregate location is provided.
285
287 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
288 return EmitAnyExpr(E, AggSlot);
289}
290
291/// EmitAnyExprToMem - Evaluate an expression into a given memory
292/// location.
294 Address Location,
295 Qualifiers Quals,
296 bool IsInit) {
297 // FIXME: This function should take an LValue as an argument.
298 switch (getEvaluationKind(E->getType())) {
299 case TEK_Complex:
301 /*isInit*/ false);
302 return;
303
304 case TEK_Aggregate: {
305 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
310 return;
311 }
312
313 case TEK_Scalar: {
314 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
315 LValue LV = MakeAddrLValue(Location, E->getType());
317 return;
318 }
319 }
320 llvm_unreachable("bad evaluation kind");
321}
322
324 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
325 QualType Type = LV.getType();
326 switch (getEvaluationKind(Type)) {
327 case TEK_Complex:
328 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
329 return;
330 case TEK_Aggregate:
334 AggValueSlot::MayOverlap, IsZeroed));
335 return;
336 case TEK_Scalar:
337 if (LV.isSimple())
338 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
339 else
341 return;
342 }
343 llvm_unreachable("bad evaluation kind");
344}
345
346static void
348 const Expr *E, Address ReferenceTemporary) {
349 // Objective-C++ ARC:
350 // If we are binding a reference to a temporary that has ownership, we
351 // need to perform retain/release operations on the temporary.
352 //
353 // FIXME: This should be looking at E, not M.
354 if (auto Lifetime = M->getType().getObjCLifetime()) {
355 switch (Lifetime) {
358 // Carry on to normal cleanup handling.
359 break;
360
362 // Nothing to do; cleaned up by an autorelease pool.
363 return;
364
367 switch (StorageDuration Duration = M->getStorageDuration()) {
368 case SD_Static:
369 // Note: we intentionally do not register a cleanup to release
370 // the object on program termination.
371 return;
372
373 case SD_Thread:
374 // FIXME: We should probably register a cleanup in this case.
375 return;
376
377 case SD_Automatic:
381 if (Lifetime == Qualifiers::OCL_Strong) {
382 const ValueDecl *VD = M->getExtendingDecl();
383 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
384 VD->hasAttr<ObjCPreciseLifetimeAttr>();
388 } else {
389 // __weak objects always get EH cleanups; otherwise, exceptions
390 // could cause really nasty crashes instead of mere leaks.
393 }
394 if (Duration == SD_FullExpression)
395 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
396 M->getType(), *Destroy,
398 else
399 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
400 M->getType(),
401 *Destroy, CleanupKind & EHCleanup);
402 return;
403
404 case SD_Dynamic:
405 llvm_unreachable("temporary cannot have dynamic storage duration");
406 }
407 llvm_unreachable("unknown storage duration");
408 }
409 }
410
412 if (DK != QualType::DK_none) {
413 switch (M->getStorageDuration()) {
414 case SD_Static:
415 case SD_Thread: {
416 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
417 if (const auto *ClassDecl =
419 ClassDecl && !ClassDecl->hasTrivialDestructor())
420 // Get the destructor for the reference temporary.
421 ReferenceTemporaryDtor = ClassDecl->getDestructor();
422
423 if (!ReferenceTemporaryDtor)
424 return;
425
426 llvm::FunctionCallee CleanupFn;
427 llvm::Constant *CleanupArg;
428 if (E->getType()->isArrayType()) {
430 ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject,
431 CGF.getLangOpts().Exceptions,
432 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
433 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
434 } else {
435 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
436 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
437 CleanupArg =
438 cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
439 }
441 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
442 } break;
444 CGF.pushDestroy(DK, ReferenceTemporary, E->getType());
445 break;
446 case SD_Automatic:
447 CGF.pushLifetimeExtendedDestroy(DK, ReferenceTemporary, E->getType());
448 break;
449 case SD_Dynamic:
450 llvm_unreachable("temporary cannot have dynamic storage duration");
451 }
452 }
453}
454
457 const Expr *Inner,
458 RawAddress *Alloca = nullptr) {
459 auto &TCG = CGF.getTargetHooks();
460 switch (M->getStorageDuration()) {
462 case SD_Automatic: {
463 // If we have a constant temporary array or record try to promote it into a
464 // constant global under the same rules a normal constant would've been
465 // promoted. This is easier on the optimizer and generally emits fewer
466 // instructions.
467 QualType Ty = Inner->getType();
468 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
469 (Ty->isArrayType() || Ty->isRecordType()) &&
470 Ty.isConstantStorage(CGF.getContext(), true, false))
471 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
472 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
473 auto *GV = new llvm::GlobalVariable(
474 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
475 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
476 llvm::GlobalValue::NotThreadLocal,
478 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
479 GV->setAlignment(alignment.getAsAlign());
480 llvm::Constant *C = GV;
481 if (AS != LangAS::Default)
482 C = TCG.performAddrSpaceCast(
483 CGF.CGM, GV, AS,
484 llvm::PointerType::get(
485 CGF.getLLVMContext(),
487 // FIXME: Should we put the new global into a COMDAT?
488 return RawAddress(C, GV->getValueType(), alignment);
489 }
490 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
491 }
492 case SD_Thread:
493 case SD_Static:
494 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
495
496 case SD_Dynamic:
497 llvm_unreachable("temporary can't have dynamic storage duration");
498 }
499 llvm_unreachable("unknown storage duration");
500}
501
502/// Helper method to check if the underlying ABI is AAPCS
503static bool isAAPCS(const TargetInfo &TargetInfo) {
504 return TargetInfo.getABI().starts_with("aapcs");
505}
506
509 const Expr *E = M->getSubExpr();
510
511 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
512 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
513 "Reference should never be pseudo-strong!");
514
515 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
516 // as that will cause the lifetime adjustment to be lost for ARC
517 auto ownership = M->getType().getObjCLifetime();
518 if (ownership != Qualifiers::OCL_None &&
519 ownership != Qualifiers::OCL_ExplicitNone) {
520 RawAddress Object = createReferenceTemporary(*this, M, E);
521 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
522 llvm::Type *Ty = ConvertTypeForMem(E->getType());
523 Object = Object.withElementType(Ty);
524
525 // createReferenceTemporary will promote the temporary to a global with a
526 // constant initializer if it can. It can only do this to a value of
527 // ARC-manageable type if the value is global and therefore "immune" to
528 // ref-counting operations. Therefore we have no need to emit either a
529 // dynamic initialization or a cleanup and we can just return the address
530 // of the temporary.
531 if (Var->hasInitializer())
532 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
533
534 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
535 }
536 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
538
539 switch (getEvaluationKind(E->getType())) {
540 default: llvm_unreachable("expected scalar or aggregate expression");
541 case TEK_Scalar:
542 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
543 break;
544 case TEK_Aggregate: {
546 E->getType().getQualifiers(),
551 break;
552 }
553 }
554
555 pushTemporaryCleanup(*this, M, E, Object);
556 return RefTempDst;
557 }
558
561 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
562
563 for (const auto &Ignored : CommaLHSs)
564 EmitIgnoredExpr(Ignored);
565
566 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
567 if (opaque->getType()->isRecordType()) {
568 assert(Adjustments.empty());
569 return EmitOpaqueValueLValue(opaque);
570 }
571 }
572
573 // Create and initialize the reference temporary.
574 RawAddress Alloca = Address::invalid();
575 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
576 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
577 Object.getPointer()->stripPointerCasts())) {
578 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
579 Object = Object.withElementType(TemporaryType);
580 // If the temporary is a global and has a constant initializer or is a
581 // constant temporary that we promoted to a global, we may have already
582 // initialized it.
583 if (!Var->hasInitializer()) {
584 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
586 if (RefType.getPointerAuth()) {
587 // Use the qualifier of the reference temporary to sign the pointer.
588 LValue LV = MakeRawAddrLValue(Object.getPointer(), RefType,
589 Object.getAlignment());
590 EmitScalarInit(E, M->getExtendingDecl(), LV, false);
591 } else {
592 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true);
593 }
594 }
595 } else {
596 switch (M->getStorageDuration()) {
597 case SD_Automatic:
598 if (EmitLifetimeStart(Alloca.getPointer())) {
600 Alloca);
601 }
602 break;
603
604 case SD_FullExpression: {
605 if (!ShouldEmitLifetimeMarkers)
606 break;
607
608 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
609 // marker. Instead, start the lifetime of a conditional temporary earlier
610 // so that it's unconditional. Don't do this with sanitizers which need
611 // more precise lifetime marks. However when inside an "await.suspend"
612 // block, we should always avoid conditional cleanup because it creates
613 // boolean marker that lives across await_suspend, which can destroy coro
614 // frame.
615 ConditionalEvaluation *OldConditional = nullptr;
616 CGBuilderTy::InsertPoint OldIP;
618 ((!SanOpts.has(SanitizerKind::HWAddress) &&
619 !SanOpts.has(SanitizerKind::Memory) &&
620 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
621 inSuspendBlock())) {
622 OldConditional = OutermostConditional;
623 OutermostConditional = nullptr;
624
625 OldIP = Builder.saveIP();
626 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
627 Builder.restoreIP(CGBuilderTy::InsertPoint(
628 Block, llvm::BasicBlock::iterator(Block->back())));
629 }
630
631 if (EmitLifetimeStart(Alloca.getPointer())) {
633 }
634
635 if (OldConditional) {
636 OutermostConditional = OldConditional;
637 Builder.restoreIP(OldIP);
638 }
639 break;
640 }
641
642 default:
643 break;
644 }
645 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
646 }
647 pushTemporaryCleanup(*this, M, E, Object);
648
649 // Perform derived-to-base casts and/or field accesses, to get from the
650 // temporary object we created (and, potentially, for which we extended
651 // the lifetime) to the subobject we're binding the reference to.
652 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
653 switch (Adjustment.Kind) {
655 Object =
656 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
657 Adjustment.DerivedToBase.BasePath->path_begin(),
658 Adjustment.DerivedToBase.BasePath->path_end(),
659 /*NullCheckValue=*/ false, E->getExprLoc());
660 break;
661
664 LV = EmitLValueForField(LV, Adjustment.Field);
665 assert(LV.isSimple() &&
666 "materialized temporary field is not a simple lvalue");
667 Object = LV.getAddress();
668 break;
669 }
670
672 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
674 E, Object, Ptr, Adjustment.Ptr.MPT, /*IsInBounds=*/true);
675 break;
676 }
677 }
678 }
679
680 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
681}
682
683RValue
685 // Emit the expression as an lvalue.
686 LValue LV = EmitLValue(E);
687 assert(LV.isSimple());
688 llvm::Value *Value = LV.getPointer(*this);
689
691 // C++11 [dcl.ref]p5 (as amended by core issue 453):
692 // If a glvalue to which a reference is directly bound designates neither
693 // an existing object or function of an appropriate type nor a region of
694 // storage of suitable size and alignment to contain an object of the
695 // reference's type, the behavior is undefined.
696 QualType Ty = E->getType();
698 }
699
700 return RValue::get(Value);
701}
702
703
704/// getAccessedFieldNo - Given an encoded value and a result number, return the
705/// input field number being accessed.
707 const llvm::Constant *Elts) {
708 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
709 ->getZExtValue();
710}
711
712static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
713 llvm::Value *Ptr) {
714 llvm::Value *A0 =
715 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
716 llvm::Value *A1 =
717 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
718 return Builder.CreateXor(Acc, A1);
719}
720
725
728 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
729 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
732}
733
735 return SanOpts.has(SanitizerKind::Null) ||
736 SanOpts.has(SanitizerKind::Alignment) ||
737 SanOpts.has(SanitizerKind::ObjectSize) ||
738 SanOpts.has(SanitizerKind::Vptr);
739}
740
742 llvm::Value *Ptr, QualType Ty,
743 CharUnits Alignment,
744 SanitizerSet SkippedChecks,
745 llvm::Value *ArraySize) {
747 return;
748
749 // Don't check pointers outside the default address space. The null check
750 // isn't correct, the object-size check isn't supported by LLVM, and we can't
751 // communicate the addresses to the runtime handler for the vptr check.
752 if (Ptr->getType()->getPointerAddressSpace())
753 return;
754
755 // Don't check pointers to volatile data. The behavior here is implementation-
756 // defined.
757 if (Ty.isVolatileQualified())
758 return;
759
760 // Quickly determine whether we have a pointer to an alloca. It's possible
761 // to skip null checks, and some alignment checks, for these pointers. This
762 // can reduce compile-time significantly.
763 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
764
765 llvm::Value *IsNonNull = nullptr;
766 bool IsGuaranteedNonNull =
767 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
768
769 llvm::BasicBlock *Done = nullptr;
770 bool DoneViaNullSanitize = false;
771
772 {
773 auto CheckHandler = SanitizerHandler::TypeMismatch;
774 SanitizerDebugLocation SanScope(this,
775 {SanitizerKind::SO_Null,
776 SanitizerKind::SO_ObjectSize,
777 SanitizerKind::SO_Alignment},
778 CheckHandler);
779
781 Checks;
782
783 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
784 bool AllowNullPointers = isNullPointerAllowed(TCK);
785 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
786 !IsGuaranteedNonNull) {
787 // The glvalue must not be an empty glvalue.
788 IsNonNull = Builder.CreateIsNotNull(Ptr);
789
790 // The IR builder can constant-fold the null check if the pointer points
791 // to a constant.
792 IsGuaranteedNonNull = IsNonNull == True;
793
794 // Skip the null check if the pointer is known to be non-null.
795 if (!IsGuaranteedNonNull) {
796 if (AllowNullPointers) {
797 // When performing pointer casts, it's OK if the value is null.
798 // Skip the remaining checks in that case.
799 Done = createBasicBlock("null");
800 DoneViaNullSanitize = true;
801 llvm::BasicBlock *Rest = createBasicBlock("not.null");
802 Builder.CreateCondBr(IsNonNull, Rest, Done);
803 EmitBlock(Rest);
804 } else {
805 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
806 }
807 }
808 }
809
810 if (SanOpts.has(SanitizerKind::ObjectSize) &&
811 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
812 !Ty->isIncompleteType()) {
813 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
814 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
815 if (ArraySize)
816 Size = Builder.CreateMul(Size, ArraySize);
817
818 // Degenerate case: new X[0] does not need an objectsize check.
819 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
820 if (!ConstantSize || !ConstantSize->isNullValue()) {
821 // The glvalue must refer to a large enough storage region.
822 // FIXME: If Address Sanitizer is enabled, insert dynamic
823 // instrumentation
824 // to check this.
825 // FIXME: Get object address space
826 llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy};
827 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
828 llvm::Value *Min = Builder.getFalse();
829 llvm::Value *NullIsUnknown = Builder.getFalse();
830 llvm::Value *Dynamic = Builder.getFalse();
831 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
832 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
833 Checks.push_back(
834 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
835 }
836 }
837
838 llvm::MaybeAlign AlignVal;
839 llvm::Value *PtrAsInt = nullptr;
840
841 if (SanOpts.has(SanitizerKind::Alignment) &&
842 !SkippedChecks.has(SanitizerKind::Alignment)) {
843 AlignVal = Alignment.getAsMaybeAlign();
844 if (!Ty->isIncompleteType() && !AlignVal)
845 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
846 /*ForPointeeType=*/true)
847 .getAsMaybeAlign();
848
849 // The glvalue must be suitably aligned.
850 if (AlignVal && *AlignVal > llvm::Align(1) &&
851 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
852 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
853 llvm::Value *Align = Builder.CreateAnd(
854 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
855 llvm::Value *Aligned =
856 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
857 if (Aligned != True)
858 Checks.push_back(
859 std::make_pair(Aligned, SanitizerKind::SO_Alignment));
860 }
861 }
862
863 if (Checks.size() > 0) {
864 llvm::Constant *StaticData[] = {
866 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
867 llvm::ConstantInt::get(Int8Ty, TCK)};
868 EmitCheck(Checks, CheckHandler, StaticData, PtrAsInt ? PtrAsInt : Ptr);
869 }
870 }
871
872 // If possible, check that the vptr indicates that there is a subobject of
873 // type Ty at offset zero within this object.
874 //
875 // C++11 [basic.life]p5,6:
876 // [For storage which does not refer to an object within its lifetime]
877 // The program has undefined behavior if:
878 // -- the [pointer or glvalue] is used to access a non-static data member
879 // or call a non-static member function
880 if (SanOpts.has(SanitizerKind::Vptr) &&
881 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
882 SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr},
883 SanitizerHandler::DynamicTypeCacheMiss);
884
885 // Ensure that the pointer is non-null before loading it. If there is no
886 // compile-time guarantee, reuse the run-time null check or emit a new one.
887 if (!IsGuaranteedNonNull) {
888 if (!IsNonNull)
889 IsNonNull = Builder.CreateIsNotNull(Ptr);
890 if (!Done)
891 Done = createBasicBlock("vptr.null");
892 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
893 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
894 EmitBlock(VptrNotNull);
895 }
896
897 // Compute a deterministic hash of the mangled name of the type.
898 SmallString<64> MangledName;
899 llvm::raw_svector_ostream Out(MangledName);
900 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
901 Out);
902
903 // Contained in NoSanitizeList based on the mangled type.
904 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
905 Out.str())) {
906 // Load the vptr, and mix it with TypeHash.
907 llvm::Value *TypeHash =
908 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
909
910 llvm::Type *VPtrTy = llvm::PointerType::get(getLLVMContext(), 0);
911 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
912 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
913 Ty->getAsCXXRecordDecl(),
915 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
916
917 llvm::Value *Hash =
918 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
919 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
920
921 // Look the hash up in our cache.
922 const int CacheSize = 128;
923 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
924 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
925 "__ubsan_vptr_type_cache");
926 llvm::Value *Slot = Builder.CreateAnd(Hash,
927 llvm::ConstantInt::get(IntPtrTy,
928 CacheSize-1));
929 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
930 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
931 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
933
934 // If the hash isn't in the cache, call a runtime handler to perform the
935 // hard work of checking whether the vptr is for an object of the right
936 // type. This will either fill in the cache and return, or produce a
937 // diagnostic.
938 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
939 llvm::Constant *StaticData[] = {
942 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
943 llvm::ConstantInt::get(Int8Ty, TCK)
944 };
945 llvm::Value *DynamicData[] = { Ptr, Hash };
946 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
947 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
948 DynamicData);
949 }
950 }
951
952 if (Done) {
953 SanitizerDebugLocation SanScope(
954 this,
955 {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr},
956 DoneViaNullSanitize ? SanitizerHandler::TypeMismatch
957 : SanitizerHandler::DynamicTypeCacheMiss);
958 Builder.CreateBr(Done);
959 EmitBlock(Done);
960 }
961}
962
964 QualType EltTy) {
966 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
967 if (!EltSize)
968 return nullptr;
969
970 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
971 if (!ArrayDeclRef)
972 return nullptr;
973
974 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
975 if (!ParamDecl)
976 return nullptr;
977
978 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
979 if (!POSAttr)
980 return nullptr;
981
982 // Don't load the size if it's a lower bound.
983 int POSType = POSAttr->getType();
984 if (POSType != 0 && POSType != 1)
985 return nullptr;
986
987 // Find the implicit size parameter.
988 auto PassedSizeIt = SizeArguments.find(ParamDecl);
989 if (PassedSizeIt == SizeArguments.end())
990 return nullptr;
991
992 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
993 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
994 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
995 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
996 C.getSizeType(), E->getExprLoc());
997 llvm::Value *SizeOfElement =
998 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
999 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
1000}
1001
1002/// If Base is known to point to the start of an array, return the length of
1003/// that array. Return 0 if the length cannot be determined.
1005 const Expr *Base,
1006 QualType &IndexedType,
1008 StrictFlexArraysLevel) {
1009 // For the vector indexing extension, the bound is the number of elements.
1010 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
1011 IndexedType = Base->getType();
1012 return CGF.Builder.getInt32(VT->getNumElements());
1013 }
1014
1015 Base = Base->IgnoreParens();
1016
1017 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1018 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
1019 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
1020 StrictFlexArraysLevel)) {
1021 CodeGenFunction::SanitizerScope SanScope(&CGF);
1022
1023 IndexedType = CE->getSubExpr()->getType();
1024 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
1025 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
1026 return CGF.Builder.getInt(CAT->getSize());
1027
1028 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
1029 return CGF.getVLASize(VAT).NumElts;
1030 // Ignore pass_object_size here. It's not applicable on decayed pointers.
1031 }
1032 }
1033
1034 CodeGenFunction::SanitizerScope SanScope(&CGF);
1035
1036 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
1037 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
1038 IndexedType = Base->getType();
1039 return POS;
1040 }
1041
1042 return nullptr;
1043}
1044
1045namespace {
1046
1047/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1048/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1049///
1050/// p in p-> a.b.c
1051///
1052/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1053/// looking for:
1054///
1055/// struct s {
1056/// struct s *ptr;
1057/// int count;
1058/// char array[] __attribute__((counted_by(count)));
1059/// };
1060///
1061/// If we have an expression like \p p->ptr->array[index], we want the
1062/// \p MemberExpr for \p p->ptr instead of \p p.
1063class StructAccessBase
1064 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1065 const RecordDecl *ExpectedRD;
1066
1067 bool IsExpectedRecordDecl(const Expr *E) const {
1068 QualType Ty = E->getType();
1069 if (Ty->isPointerType())
1070 Ty = Ty->getPointeeType();
1071 return ExpectedRD == Ty->getAsRecordDecl();
1072 }
1073
1074public:
1075 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1076
1077 //===--------------------------------------------------------------------===//
1078 // Visitor Methods
1079 //===--------------------------------------------------------------------===//
1080
1081 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1082 // horrors like this:
1083 //
1084 // struct S {
1085 // int x, y;
1086 // int blah[] __attribute__((counted_by(x)));
1087 // } s;
1088 //
1089 // int foo(int index, int val) {
1090 // int (S::*IHatePMDs)[] = &S::blah;
1091 // (s.*IHatePMDs)[index] = val;
1092 // }
1093
1094 const Expr *Visit(const Expr *E) {
1095 return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
1096 }
1097
1098 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1099
1100 // These are the types we expect to return (in order of most to least
1101 // likely):
1102 //
1103 // 1. DeclRefExpr - This is the expression for the base of the structure.
1104 // It's exactly what we want to build an access to the \p counted_by
1105 // field.
1106 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1107 // as the flexble array member's lexical enclosing \p RecordDecl. This
1108 // allows us to catch things like: "p->p->array"
1109 // 3. CompoundLiteralExpr - This is for people who create something
1110 // heretical like (struct foo has a flexible array member):
1111 //
1112 // (struct foo){ 1, 2 }.blah[idx];
1113 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1114 return IsExpectedRecordDecl(E) ? E : nullptr;
1115 }
1116 const Expr *VisitMemberExpr(const MemberExpr *E) {
1117 if (IsExpectedRecordDecl(E) && E->isArrow())
1118 return E;
1119 const Expr *Res = Visit(E->getBase());
1120 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1121 }
1122 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1123 return IsExpectedRecordDecl(E) ? E : nullptr;
1124 }
1125 const Expr *VisitCallExpr(const CallExpr *E) {
1126 return IsExpectedRecordDecl(E) ? E : nullptr;
1127 }
1128
1129 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1130 if (IsExpectedRecordDecl(E))
1131 return E;
1132 return Visit(E->getBase());
1133 }
1134 const Expr *VisitCastExpr(const CastExpr *E) {
1135 if (E->getCastKind() == CK_LValueToRValue)
1136 return IsExpectedRecordDecl(E) ? E : nullptr;
1137 return Visit(E->getSubExpr());
1138 }
1139 const Expr *VisitParenExpr(const ParenExpr *E) {
1140 return Visit(E->getSubExpr());
1141 }
1142 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1143 return Visit(E->getSubExpr());
1144 }
1145 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1146 return Visit(E->getSubExpr());
1147 }
1148};
1149
1150} // end anonymous namespace
1151
1153
1155 const FieldDecl *Field,
1156 RecIndicesTy &Indices) {
1157 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1158 int64_t FieldNo = -1;
1159 for (const FieldDecl *FD : RD->fields()) {
1160 if (!Layout.containsFieldDecl(FD))
1161 // This could happen if the field has a struct type that's empty. I don't
1162 // know why either.
1163 continue;
1164
1165 FieldNo = Layout.getLLVMFieldNo(FD);
1166 if (FD == Field) {
1167 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1168 return true;
1169 }
1170
1171 QualType Ty = FD->getType();
1172 if (Ty->isRecordType()) {
1173 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1174 if (RD->isUnion())
1175 FieldNo = 0;
1176 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1177 return true;
1178 }
1179 }
1180 }
1181
1182 return false;
1183}
1184
1186 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1187 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1188
1189 // Find the base struct expr (i.e. p in p->a.b.c.d).
1190 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1191 if (!StructBase || StructBase->HasSideEffects(getContext()))
1192 return nullptr;
1193
1194 llvm::Value *Res = nullptr;
1195 if (StructBase->getType()->isPointerType()) {
1196 LValueBaseInfo BaseInfo;
1197 TBAAAccessInfo TBAAInfo;
1198 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1199 Res = Addr.emitRawPointer(*this);
1200 } else if (StructBase->isLValue()) {
1201 LValue LV = EmitLValue(StructBase);
1202 Address Addr = LV.getAddress();
1203 Res = Addr.emitRawPointer(*this);
1204 } else {
1205 return nullptr;
1206 }
1207
1208 RecIndicesTy Indices;
1209 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1210 if (Indices.empty())
1211 return nullptr;
1212
1213 Indices.push_back(Builder.getInt32(0));
1214 CanQualType T = CGM.getContext().getCanonicalTagType(RD);
1215 return Builder.CreateInBoundsGEP(ConvertType(T), Res,
1216 RecIndicesTy(llvm::reverse(Indices)),
1217 "counted_by.gep");
1218}
1219
1220/// This method is typically called in contexts where we can't generate
1221/// side-effects, like in __builtin_dynamic_object_size. When finding
1222/// expressions, only choose those that have either already been emitted or can
1223/// be loaded without side-effects.
1224///
1225/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1226/// within the top-level struct.
1227/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1229 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1230 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1231 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1232 getIntAlign(), "counted_by.load");
1233 return nullptr;
1234}
1235
1237 llvm::Value *Index, QualType IndexType,
1238 bool Accessed) {
1239 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1240 "should not be called unless adding bounds checks");
1241 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1242 getLangOpts().getStrictFlexArraysLevel();
1243 QualType IndexedType;
1244 llvm::Value *Bound =
1245 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1246
1247 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1248}
1249
1250void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1251 llvm::Value *Index,
1252 QualType IndexType,
1253 QualType IndexedType, bool Accessed) {
1254 if (!Bound)
1255 return;
1256
1257 auto CheckKind = SanitizerKind::SO_ArrayBounds;
1258 auto CheckHandler = SanitizerHandler::OutOfBounds;
1259 SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler);
1260
1261 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1262 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1263 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1264
1265 llvm::Constant *StaticData[] = {
1267 EmitCheckTypeDescriptor(IndexedType),
1268 EmitCheckTypeDescriptor(IndexType)
1269 };
1270 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1271 : Builder.CreateICmpULE(IndexVal, BoundVal);
1272 EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData, Index);
1273}
1274
1277 bool isInc, bool isPre) {
1278 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1279
1280 llvm::Value *NextVal;
1281 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1282 uint64_t AmountVal = isInc ? 1 : -1;
1283 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1284
1285 // Add the inc/dec to the real part.
1286 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1287 } else {
1288 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1289 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1290 if (!isInc)
1291 FVal.changeSign();
1292 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1293
1294 // Add the inc/dec to the real part.
1295 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1296 }
1297
1298 ComplexPairTy IncVal(NextVal, InVal.second);
1299
1300 // Store the updated result through the lvalue.
1301 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1302 if (getLangOpts().OpenMP)
1303 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1304 E->getSubExpr());
1305
1306 // If this is a postinc, return the value read from memory, otherwise use the
1307 // updated value.
1308 return isPre ? IncVal : InVal;
1309}
1310
1312 CodeGenFunction *CGF) {
1313 // Bind VLAs in the cast type.
1314 if (CGF && E->getType()->isVariablyModifiedType())
1316
1317 if (CGDebugInfo *DI = getModuleDebugInfo())
1318 DI->EmitExplicitCastType(E->getType());
1319}
1320
1321//===----------------------------------------------------------------------===//
1322// LValue Expression Emission
1323//===----------------------------------------------------------------------===//
1324
1325static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx,
1326 CharUnits eltSize) {
1327 // If we have a constant index, we can use the exact offset of the
1328 // element we're accessing.
1329 if (auto *constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
1330 CharUnits offset = constantIdx->getZExtValue() * eltSize;
1331 return arrayAlign.alignmentAtOffset(offset);
1332 }
1333
1334 // Otherwise, use the worst-case alignment for any element.
1335 return arrayAlign.alignmentOfArrayElement(eltSize);
1336}
1337
1338/// Emit pointer + index arithmetic.
1340 const BinaryOperator *BO,
1341 LValueBaseInfo *BaseInfo,
1342 TBAAAccessInfo *TBAAInfo,
1343 KnownNonNull_t IsKnownNonNull) {
1344 assert(BO->isAdditiveOp() && "Expect an addition or subtraction.");
1345 Expr *pointerOperand = BO->getLHS();
1346 Expr *indexOperand = BO->getRHS();
1347 bool isSubtraction = BO->getOpcode() == BO_Sub;
1348
1349 Address BaseAddr = Address::invalid();
1350 llvm::Value *index = nullptr;
1351 // In a subtraction, the LHS is always the pointer.
1352 // Note: do not change the evaluation order.
1353 if (!isSubtraction && !pointerOperand->getType()->isAnyPointerType()) {
1354 std::swap(pointerOperand, indexOperand);
1355 index = CGF.EmitScalarExpr(indexOperand);
1356 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1358 } else {
1359 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1361 index = CGF.EmitScalarExpr(indexOperand);
1362 }
1363
1364 llvm::Value *pointer = BaseAddr.getBasePointer();
1365 llvm::Value *Res = CGF.EmitPointerArithmetic(
1366 BO, pointerOperand, pointer, indexOperand, index, isSubtraction);
1367 QualType PointeeTy = BO->getType()->getPointeeType();
1368 CharUnits Align =
1370 CGF.getContext().getTypeSizeInChars(PointeeTy));
1371 return Address(Res, CGF.ConvertTypeForMem(PointeeTy), Align,
1373 /*Offset=*/nullptr, IsKnownNonNull);
1374}
1375
1377 TBAAAccessInfo *TBAAInfo,
1378 KnownNonNull_t IsKnownNonNull,
1379 CodeGenFunction &CGF) {
1380 // We allow this with ObjC object pointers because of fragile ABIs.
1381 assert(E->getType()->isPointerType() ||
1383 E = E->IgnoreParens();
1384
1385 // Casts:
1386 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1387 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1388 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1389
1390 switch (CE->getCastKind()) {
1391 // Non-converting casts (but not C's implicit conversion from void*).
1392 case CK_BitCast:
1393 case CK_NoOp:
1394 case CK_AddressSpaceConversion:
1395 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1396 if (PtrTy->getPointeeType()->isVoidType())
1397 break;
1398
1399 LValueBaseInfo InnerBaseInfo;
1400 TBAAAccessInfo InnerTBAAInfo;
1402 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1403 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1404 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1405
1406 if (isa<ExplicitCastExpr>(CE)) {
1407 LValueBaseInfo TargetTypeBaseInfo;
1408 TBAAAccessInfo TargetTypeTBAAInfo;
1410 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1411 if (TBAAInfo)
1412 *TBAAInfo =
1413 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1414 // If the source l-value is opaque, honor the alignment of the
1415 // casted-to type.
1416 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1417 if (BaseInfo)
1418 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1419 Addr.setAlignment(Align);
1420 }
1421 }
1422
1423 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1424 CE->getCastKind() == CK_BitCast) {
1425 if (auto PT = E->getType()->getAs<PointerType>())
1426 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1427 /*MayBeNull=*/true,
1429 CE->getBeginLoc());
1430 }
1431
1432 llvm::Type *ElemTy =
1434 Addr = Addr.withElementType(ElemTy);
1435 if (CE->getCastKind() == CK_AddressSpaceConversion)
1437 Addr, CGF.ConvertType(E->getType()), ElemTy);
1438
1439 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1440 CE->getType());
1441 }
1442 break;
1443
1444 // Array-to-pointer decay.
1445 case CK_ArrayToPointerDecay:
1446 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1447
1448 // Derived-to-base conversions.
1449 case CK_UncheckedDerivedToBase:
1450 case CK_DerivedToBase: {
1451 // TODO: Support accesses to members of base classes in TBAA. For now, we
1452 // conservatively pretend that the complete object is of the base class
1453 // type.
1454 if (TBAAInfo)
1455 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1457 CE->getSubExpr(), BaseInfo, nullptr,
1458 (KnownNonNull_t)(IsKnownNonNull ||
1459 CE->getCastKind() == CK_UncheckedDerivedToBase));
1460 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1461 return CGF.GetAddressOfBaseClass(
1462 Addr, Derived, CE->path_begin(), CE->path_end(),
1463 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1464 }
1465
1466 // TODO: Is there any reason to treat base-to-derived conversions
1467 // specially?
1468 default:
1469 break;
1470 }
1471 }
1472
1473 // Unary &.
1474 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1475 if (UO->getOpcode() == UO_AddrOf) {
1476 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1477 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1478 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1479 return LV.getAddress();
1480 }
1481 }
1482
1483 // std::addressof and variants.
1484 if (auto *Call = dyn_cast<CallExpr>(E)) {
1485 switch (Call->getBuiltinCallee()) {
1486 default:
1487 break;
1488 case Builtin::BIaddressof:
1489 case Builtin::BI__addressof:
1490 case Builtin::BI__builtin_addressof: {
1491 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1492 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1493 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1494 return LV.getAddress();
1495 }
1496 }
1497 }
1498
1499 // Pointer arithmetic: pointer +/- index.
1500 if (auto *BO = dyn_cast<BinaryOperator>(E)) {
1501 if (BO->isAdditiveOp())
1502 return emitPointerArithmetic(CGF, BO, BaseInfo, TBAAInfo, IsKnownNonNull);
1503 }
1504
1505 // TODO: conditional operators, comma.
1506
1507 // Otherwise, use the alignment of the type.
1510 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1511}
1512
1513/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1514/// derive a more accurate bound on the alignment of the pointer.
1516 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1517 KnownNonNull_t IsKnownNonNull) {
1518 Address Addr =
1519 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1520 if (IsKnownNonNull && !Addr.isKnownNonNull())
1521 Addr.setKnownNonNull();
1522 return Addr;
1523}
1524
1526 llvm::Value *V = RV.getScalarVal();
1527 if (auto MPT = T->getAs<MemberPointerType>())
1528 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1529 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1530}
1531
1533 if (Ty->isVoidType())
1534 return RValue::get(nullptr);
1535
1536 switch (getEvaluationKind(Ty)) {
1537 case TEK_Complex: {
1538 llvm::Type *EltTy =
1540 llvm::Value *U = llvm::UndefValue::get(EltTy);
1541 return RValue::getComplex(std::make_pair(U, U));
1542 }
1543
1544 // If this is a use of an undefined aggregate type, the aggregate must have an
1545 // identifiable address. Just because the contents of the value are undefined
1546 // doesn't mean that the address can't be taken and compared.
1547 case TEK_Aggregate: {
1548 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1549 return RValue::getAggregate(DestPtr);
1550 }
1551
1552 case TEK_Scalar:
1553 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1554 }
1555 llvm_unreachable("bad evaluation kind");
1556}
1557
1559 const char *Name) {
1560 ErrorUnsupported(E, Name);
1561 return GetUndefRValue(E->getType());
1562}
1563
1565 const char *Name) {
1566 ErrorUnsupported(E, Name);
1567 llvm::Type *ElTy = ConvertType(E->getType());
1568 llvm::Type *Ty = UnqualPtrTy;
1569 return MakeAddrLValue(
1570 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1571}
1572
1574 const Expr *Base = Obj;
1575 while (!isa<CXXThisExpr>(Base)) {
1576 // The result of a dynamic_cast can be null.
1578 return false;
1579
1580 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1581 Base = CE->getSubExpr();
1582 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1583 Base = PE->getSubExpr();
1584 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1585 if (UO->getOpcode() == UO_Extension)
1586 Base = UO->getSubExpr();
1587 else
1588 return false;
1589 } else {
1590 return false;
1591 }
1592 }
1593 return true;
1594}
1595
1597 LValue LV;
1598 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1599 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1600 else
1601 LV = EmitLValue(E);
1602 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1603 SanitizerSet SkippedChecks;
1604 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1605 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1606 if (IsBaseCXXThis)
1607 SkippedChecks.set(SanitizerKind::Alignment, true);
1608 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1609 SkippedChecks.set(SanitizerKind::Null, true);
1610 }
1611 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1612 }
1613 return LV;
1614}
1615
1616/// EmitLValue - Emit code to compute a designator that specifies the location
1617/// of the expression.
1618///
1619/// This can return one of two things: a simple address or a bitfield reference.
1620/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1621/// an LLVM pointer type.
1622///
1623/// If this returns a bitfield reference, nothing about the pointee type of the
1624/// LLVM value is known: For example, it may not be a pointer to an integer.
1625///
1626/// If this returns a normal address, and if the lvalue's C type is fixed size,
1627/// this method guarantees that the returned pointer type will point to an LLVM
1628/// type of the same size of the lvalue's type. If the lvalue has a variable
1629/// length type, this is not possible.
1630///
1632 KnownNonNull_t IsKnownNonNull) {
1633 // Running with sufficient stack space to avoid deeply nested expressions
1634 // cause a stack overflow.
1635 LValue LV;
1636 CGM.runWithSufficientStackSpace(
1637 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1638
1639 if (IsKnownNonNull && !LV.isKnownNonNull())
1640 LV.setKnownNonNull();
1641 return LV;
1642}
1643
1645 const ASTContext &Ctx) {
1646 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1647 if (isa<OpaqueValueExpr>(SE))
1648 return SE->getType();
1649 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1650}
1651
1652LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1653 KnownNonNull_t IsKnownNonNull) {
1654 ApplyDebugLocation DL(*this, E);
1655 switch (E->getStmtClass()) {
1656 default: return EmitUnsupportedLValue(E, "l-value expression");
1657
1658 case Expr::ObjCPropertyRefExprClass:
1659 llvm_unreachable("cannot emit a property reference directly");
1660
1661 case Expr::ObjCSelectorExprClass:
1663 case Expr::ObjCIsaExprClass:
1665 case Expr::BinaryOperatorClass:
1667 case Expr::CompoundAssignOperatorClass: {
1668 QualType Ty = E->getType();
1669 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1670 Ty = AT->getValueType();
1671 if (!Ty->isAnyComplexType())
1674 }
1675 case Expr::CallExprClass:
1676 case Expr::CXXMemberCallExprClass:
1677 case Expr::CXXOperatorCallExprClass:
1678 case Expr::UserDefinedLiteralClass:
1680 case Expr::CXXRewrittenBinaryOperatorClass:
1681 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1682 IsKnownNonNull);
1683 case Expr::VAArgExprClass:
1685 case Expr::DeclRefExprClass:
1687 case Expr::ConstantExprClass: {
1688 const ConstantExpr *CE = cast<ConstantExpr>(E);
1689 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1690 QualType RetType = getConstantExprReferredType(CE, getContext());
1691 return MakeNaturalAlignAddrLValue(Result, RetType);
1692 }
1693 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1694 }
1695 case Expr::ParenExprClass:
1696 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1697 case Expr::GenericSelectionExprClass:
1698 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1699 IsKnownNonNull);
1700 case Expr::PredefinedExprClass:
1702 case Expr::StringLiteralClass:
1704 case Expr::ObjCEncodeExprClass:
1706 case Expr::PseudoObjectExprClass:
1708 case Expr::InitListExprClass:
1710 case Expr::CXXTemporaryObjectExprClass:
1711 case Expr::CXXConstructExprClass:
1713 case Expr::CXXBindTemporaryExprClass:
1715 case Expr::CXXUuidofExprClass:
1717 case Expr::LambdaExprClass:
1718 return EmitAggExprToLValue(E);
1719
1720 case Expr::ExprWithCleanupsClass: {
1721 const auto *cleanups = cast<ExprWithCleanups>(E);
1722 RunCleanupsScope Scope(*this);
1723 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1724 if (LV.isSimple()) {
1725 // Defend against branches out of gnu statement expressions surrounded by
1726 // cleanups.
1727 Address Addr = LV.getAddress();
1728 llvm::Value *V = Addr.getBasePointer();
1729 Scope.ForceCleanup({&V});
1730 Addr.replaceBasePointer(V);
1731 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1732 LV.getBaseInfo(), LV.getTBAAInfo());
1733 }
1734 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1735 // bitfield lvalue or some other non-simple lvalue?
1736 return LV;
1737 }
1738
1739 case Expr::CXXDefaultArgExprClass: {
1740 auto *DAE = cast<CXXDefaultArgExpr>(E);
1741 CXXDefaultArgExprScope Scope(*this, DAE);
1742 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1743 }
1744 case Expr::CXXDefaultInitExprClass: {
1745 auto *DIE = cast<CXXDefaultInitExpr>(E);
1746 CXXDefaultInitExprScope Scope(*this, DIE);
1747 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1748 }
1749 case Expr::CXXTypeidExprClass:
1751
1752 case Expr::ObjCMessageExprClass:
1754 case Expr::ObjCIvarRefExprClass:
1756 case Expr::StmtExprClass:
1758 case Expr::UnaryOperatorClass:
1760 case Expr::ArraySubscriptExprClass:
1762 case Expr::MatrixSubscriptExprClass:
1764 case Expr::ArraySectionExprClass:
1766 case Expr::ExtVectorElementExprClass:
1768 case Expr::CXXThisExprClass:
1770 case Expr::MemberExprClass:
1772 case Expr::CompoundLiteralExprClass:
1774 case Expr::ConditionalOperatorClass:
1776 case Expr::BinaryConditionalOperatorClass:
1778 case Expr::ChooseExprClass:
1779 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1780 case Expr::OpaqueValueExprClass:
1782 case Expr::SubstNonTypeTemplateParmExprClass:
1783 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1784 IsKnownNonNull);
1785 case Expr::ImplicitCastExprClass:
1786 case Expr::CStyleCastExprClass:
1787 case Expr::CXXFunctionalCastExprClass:
1788 case Expr::CXXStaticCastExprClass:
1789 case Expr::CXXDynamicCastExprClass:
1790 case Expr::CXXReinterpretCastExprClass:
1791 case Expr::CXXConstCastExprClass:
1792 case Expr::CXXAddrspaceCastExprClass:
1793 case Expr::ObjCBridgedCastExprClass:
1794 return EmitCastLValue(cast<CastExpr>(E));
1795
1796 case Expr::MaterializeTemporaryExprClass:
1798
1799 case Expr::CoawaitExprClass:
1801 case Expr::CoyieldExprClass:
1803 case Expr::PackIndexingExprClass:
1804 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1805 case Expr::HLSLOutArgExprClass:
1806 llvm_unreachable("cannot emit a HLSL out argument directly");
1807 }
1808}
1809
1810/// Given an object of the given canonical type, can we safely copy a
1811/// value out of it based on its initializer?
1813 assert(type.isCanonical());
1814 assert(!type->isReferenceType());
1815
1816 // Must be const-qualified but non-volatile.
1817 Qualifiers qs = type.getLocalQualifiers();
1818 if (!qs.hasConst() || qs.hasVolatile()) return false;
1819
1820 // Otherwise, all object types satisfy this except C++ classes with
1821 // mutable subobjects or non-trivial copy/destroy behavior.
1822 if (const auto *RT = dyn_cast<RecordType>(type))
1823 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
1824 RD = RD->getDefinitionOrSelf();
1825 if (RD->hasMutableFields() || !RD->isTrivial())
1826 return false;
1827 }
1828
1829 return true;
1830}
1831
1832/// Can we constant-emit a load of a reference to a variable of the
1833/// given type? This is different from predicates like
1834/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1835/// in situations that don't necessarily satisfy the language's rules
1836/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1837/// to do this with const float variables even if those variables
1838/// aren't marked 'constexpr'.
1846 type = type.getCanonicalType();
1847 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1848 if (isConstantEmittableObjectType(ref->getPointeeType()))
1850 return CEK_AsReferenceOnly;
1851 }
1853 return CEK_AsValueOnly;
1854 return CEK_None;
1855}
1856
1857/// Try to emit a reference to the given value without producing it as
1858/// an l-value. This is just an optimization, but it avoids us needing
1859/// to emit global copies of variables if they're named without triggering
1860/// a formal use in a context where we can't emit a direct reference to them,
1861/// for instance if a block or lambda or a member of a local class uses a
1862/// const int variable or constexpr variable from an enclosing function.
1865 const ValueDecl *Value = RefExpr->getDecl();
1866
1867 // The value needs to be an enum constant or a constant variable.
1869 if (isa<ParmVarDecl>(Value)) {
1870 CEK = CEK_None;
1871 } else if (const auto *var = dyn_cast<VarDecl>(Value)) {
1872 CEK = checkVarTypeForConstantEmission(var->getType());
1873 } else if (isa<EnumConstantDecl>(Value)) {
1874 CEK = CEK_AsValueOnly;
1875 } else {
1876 CEK = CEK_None;
1877 }
1878 if (CEK == CEK_None) return ConstantEmission();
1879
1880 Expr::EvalResult result;
1881 bool resultIsReference;
1882 QualType resultType;
1883
1884 // It's best to evaluate all the way as an r-value if that's permitted.
1885 if (CEK != CEK_AsReferenceOnly &&
1886 RefExpr->EvaluateAsRValue(result, getContext())) {
1887 resultIsReference = false;
1888 resultType = RefExpr->getType().getUnqualifiedType();
1889
1890 // Otherwise, try to evaluate as an l-value.
1891 } else if (CEK != CEK_AsValueOnly &&
1892 RefExpr->EvaluateAsLValue(result, getContext())) {
1893 resultIsReference = true;
1894 resultType = Value->getType();
1895
1896 // Failure.
1897 } else {
1898 return ConstantEmission();
1899 }
1900
1901 // In any case, if the initializer has side-effects, abandon ship.
1902 if (result.HasSideEffects)
1903 return ConstantEmission();
1904
1905 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1906 // referencing a global host variable by copy. In this case the lambda should
1907 // make a copy of the value of the global host variable. The DRE of the
1908 // captured reference variable cannot be emitted as load from the host
1909 // global variable as compile time constant, since the host variable is not
1910 // accessible on device. The DRE of the captured reference variable has to be
1911 // loaded from captures.
1912 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1914 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1915 if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1916 const APValue::LValueBase &base = result.Val.getLValueBase();
1917 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1918 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1919 if (!VD->hasAttr<CUDADeviceAttr>()) {
1920 return ConstantEmission();
1921 }
1922 }
1923 }
1924 }
1925 }
1926
1927 // Emit as a constant.
1928 llvm::Constant *C = ConstantEmitter(*this).emitAbstract(
1929 RefExpr->getLocation(), result.Val, resultType);
1930
1931 // Make sure we emit a debug reference to the global variable.
1932 // This should probably fire even for
1933 if (isa<VarDecl>(Value)) {
1934 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(Value)))
1935 EmitDeclRefExprDbgValue(RefExpr, result.Val);
1936 } else {
1938 EmitDeclRefExprDbgValue(RefExpr, result.Val);
1939 }
1940
1941 // If we emitted a reference constant, we need to dereference that.
1942 if (resultIsReference)
1944
1946}
1947
1949 const MemberExpr *ME) {
1950 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1951 // Try to emit static variable member expressions as DREs.
1952 return DeclRefExpr::Create(
1954 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1955 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1956 }
1957 return nullptr;
1958}
1959
1963 return tryEmitAsConstant(DRE);
1964 return ConstantEmission();
1965}
1966
1968 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1969 assert(Constant && "not a constant");
1970 if (Constant.isReference())
1971 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1972 E->getExprLoc())
1973 .getScalarVal();
1974 return Constant.getValue();
1975}
1976
1978 SourceLocation Loc) {
1979 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1980 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1981 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1982}
1983
1985 llvm::APInt &Min, llvm::APInt &End,
1986 bool StrictEnums, bool IsBool) {
1987 const auto *ED = Ty->getAsEnumDecl();
1988 bool IsRegularCPlusPlusEnum =
1989 CGF.getLangOpts().CPlusPlus && StrictEnums && ED && !ED->isFixed();
1990 if (!IsBool && !IsRegularCPlusPlusEnum)
1991 return false;
1992
1993 if (IsBool) {
1994 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1995 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1996 } else {
1997 ED->getValueRange(End, Min);
1998 }
1999 return true;
2000}
2001
2002llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
2003 llvm::APInt Min, End;
2004 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
2005 Ty->hasBooleanRepresentation() && !Ty->isVectorType()))
2006 return nullptr;
2007
2008 llvm::MDBuilder MDHelper(getLLVMContext());
2009 return MDHelper.createRange(Min, End);
2010}
2011
2013 SourceLocation Loc) {
2014 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2015 // In order to prevent the optimizer from throwing away the check, don't
2016 // attach range metadata to the load.
2017 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2018 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2019 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2020 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2021 llvm::MDNode::get(CGM.getLLVMContext(), {}));
2022 }
2023 }
2024}
2025
2027 SourceLocation Loc) {
2028 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
2029 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
2030 if (!HasBoolCheck && !HasEnumCheck)
2031 return false;
2032
2033 bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) ||
2034 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
2035 bool NeedsBoolCheck = HasBoolCheck && IsBool;
2036 bool NeedsEnumCheck = HasEnumCheck && Ty->isEnumeralType();
2037 if (!NeedsBoolCheck && !NeedsEnumCheck)
2038 return false;
2039
2040 // Single-bit booleans don't need to be checked. Special-case this to avoid
2041 // a bit width mismatch when handling bitfield values. This is handled by
2042 // EmitFromMemory for the non-bitfield case.
2043 if (IsBool &&
2044 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
2045 return false;
2046
2047 if (NeedsEnumCheck &&
2048 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
2049 return false;
2050
2051 llvm::APInt Min, End;
2052 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
2053 return true;
2054
2056 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
2057
2058 auto &Ctx = getLLVMContext();
2059 auto CheckHandler = SanitizerHandler::LoadInvalidValue;
2060 SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler);
2061 llvm::Value *Check;
2062 --End;
2063 if (!Min) {
2064 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
2065 } else {
2066 llvm::Value *Upper =
2067 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
2068 llvm::Value *Lower =
2069 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
2070 Check = Builder.CreateAnd(Upper, Lower);
2071 }
2072 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
2074 EmitCheck(std::make_pair(Check, Kind), CheckHandler, StaticArgs, Value);
2075 return true;
2076}
2077
2079 QualType Ty,
2080 SourceLocation Loc,
2081 LValueBaseInfo BaseInfo,
2082 TBAAAccessInfo TBAAInfo,
2083 bool isNontemporal) {
2084 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2085 if (GV->isThreadLocal())
2086 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2088
2089 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2090 // Boolean vectors use `iN` as storage type.
2091 if (ClangVecTy->isPackedVectorBoolType(getContext())) {
2092 llvm::Type *ValTy = ConvertType(Ty);
2093 unsigned ValNumElems =
2094 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2095 // Load the `iP` storage object (P is the padded vector size).
2096 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
2097 const auto *RawIntTy = RawIntV->getType();
2098 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
2099 // Bitcast iP --> <P x i1>.
2100 auto *PaddedVecTy = llvm::FixedVectorType::get(
2101 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2102 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2103 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2104 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2105
2106 return EmitFromMemory(V, Ty);
2107 }
2108
2109 // Handles vectors of sizes that are likely to be expanded to a larger size
2110 // to optimize performance.
2111 auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2112 auto *NewVecTy =
2113 CGM.getABIInfo().getOptimalVectorMemoryType(VTy, getLangOpts());
2114
2115 if (VTy != NewVecTy) {
2116 Address Cast = Addr.withElementType(NewVecTy);
2117 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2118 unsigned OldNumElements = VTy->getNumElements();
2119 SmallVector<int, 16> Mask(OldNumElements);
2120 std::iota(Mask.begin(), Mask.end(), 0);
2121 V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2122 return EmitFromMemory(V, Ty);
2123 }
2124 }
2125
2126 // Atomic operations have to be done on integral types.
2127 LValue AtomicLValue =
2128 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2129 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2130 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2131 }
2132
2133 Addr =
2134 Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
2135
2136 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2137 if (isNontemporal) {
2138 llvm::MDNode *Node = llvm::MDNode::get(
2139 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2140 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2141 }
2142
2143 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2144
2145 maybeAttachRangeForLoad(Load, Ty, Loc);
2146
2147 return EmitFromMemory(Load, Ty);
2148}
2149
2150/// Converts a scalar value from its primary IR type (as returned
2151/// by ConvertType) to its load/store type (as returned by
2152/// convertTypeForLoadStore).
2153llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2154 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2155 Ty = AtomicTy->getValueType();
2156
2157 if (Ty->isExtVectorBoolType()) {
2158 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2159 if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() >
2160 Value->getType()->getScalarSizeInBits())
2161 return Builder.CreateZExt(Value, StoreTy);
2162
2163 // Expand to the memory bit width.
2164 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2165 // <N x i1> --> <P x i1>.
2166 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2167 // <P x i1> --> iP.
2168 Value = Builder.CreateBitCast(Value, StoreTy);
2169 }
2170
2171 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) {
2172 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2174 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2175 }
2176
2177 return Value;
2178}
2179
2180/// Converts a scalar value from its load/store type (as returned
2181/// by convertTypeForLoadStore) to its primary IR type (as returned
2182/// by ConvertType).
2183llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2184 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2185 Ty = AtomicTy->getValueType();
2186
2188 const auto *RawIntTy = Value->getType();
2189
2190 // Bitcast iP --> <P x i1>.
2191 auto *PaddedVecTy = llvm::FixedVectorType::get(
2192 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2193 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2194 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2195 llvm::Type *ValTy = ConvertType(Ty);
2196 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2197 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2198 }
2199
2200 llvm::Type *ResTy = ConvertType(Ty);
2201 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType() ||
2202 Ty->isExtVectorBoolType())
2203 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2204
2205 return Value;
2206}
2207
2208// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2209// MatrixType), if it points to a array (the memory type of MatrixType).
2211 CodeGenFunction &CGF,
2212 bool IsVector = true) {
2213 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2214 if (ArrayTy && IsVector) {
2215 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2216 ArrayTy->getNumElements());
2217
2218 return Addr.withElementType(VectorTy);
2219 }
2220 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2221 if (VectorTy && !IsVector) {
2222 auto *ArrayTy = llvm::ArrayType::get(
2223 VectorTy->getElementType(),
2224 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2225
2226 return Addr.withElementType(ArrayTy);
2227 }
2228
2229 return Addr;
2230}
2231
2232// Emit a store of a matrix LValue. This may require casting the original
2233// pointer to memory address (ArrayType) to a pointer to the value type
2234// (VectorType).
2235static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2236 bool isInit, CodeGenFunction &CGF) {
2237 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2238 value->getType()->isVectorTy());
2239 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2240 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2241 lvalue.isNontemporal());
2242}
2243
2245 bool Volatile, QualType Ty,
2246 LValueBaseInfo BaseInfo,
2247 TBAAAccessInfo TBAAInfo,
2248 bool isInit, bool isNontemporal) {
2249 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2250 if (GV->isThreadLocal())
2251 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2253
2254 // Handles vectors of sizes that are likely to be expanded to a larger size
2255 // to optimize performance.
2256 llvm::Type *SrcTy = Value->getType();
2257 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2258 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2259 auto *NewVecTy =
2260 CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts());
2261 if (!ClangVecTy->isPackedVectorBoolType(getContext()) &&
2262 VecTy != NewVecTy) {
2263 SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1);
2264 std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2265 Value = Builder.CreateShuffleVector(Value, Mask, "extractVec");
2266 SrcTy = NewVecTy;
2267 }
2268 if (Addr.getElementType() != SrcTy)
2269 Addr = Addr.withElementType(SrcTy);
2270 }
2271 }
2272
2273 Value = EmitToMemory(Value, Ty);
2274
2275 LValue AtomicLValue =
2276 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2277 if (Ty->isAtomicType() ||
2278 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2279 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2280 return;
2281 }
2282
2283 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2285
2286 if (isNontemporal) {
2287 llvm::MDNode *Node =
2288 llvm::MDNode::get(Store->getContext(),
2289 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2290 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2291 }
2292
2293 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2294}
2295
2296void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2297 bool isInit) {
2298 if (lvalue.getType()->isConstantMatrixType()) {
2299 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2300 return;
2301 }
2302
2303 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2304 lvalue.getType(), lvalue.getBaseInfo(),
2305 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2306}
2307
2308// Emit a load of a LValue of matrix type. This may require casting the pointer
2309// to memory address (ArrayType) to a pointer to the value type (VectorType).
2311 CodeGenFunction &CGF) {
2312 assert(LV.getType()->isConstantMatrixType());
2313 Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);
2314 LV.setAddress(Addr);
2315 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2316}
2317
2319 SourceLocation Loc) {
2320 QualType Ty = LV.getType();
2321 switch (getEvaluationKind(Ty)) {
2322 case TEK_Scalar:
2323 return EmitLoadOfLValue(LV, Loc);
2324 case TEK_Complex:
2325 return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
2326 case TEK_Aggregate:
2327 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2328 return Slot.asRValue();
2329 }
2330 llvm_unreachable("bad evaluation kind");
2331}
2332
2333/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2334/// method emits the address of the lvalue, then loads the result as an rvalue,
2335/// returning the rvalue.
2337 // Load from __ptrauth.
2338 if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) {
2340 llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal();
2341 return RValue::get(EmitPointerAuthUnqualify(PtrAuth, Value, LV.getType(),
2342 LV.getAddress(),
2343 /*known nonnull*/ false));
2344 }
2345
2346 if (LV.isObjCWeak()) {
2347 // load of a __weak object.
2348 Address AddrWeakObj = LV.getAddress();
2349 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2350 AddrWeakObj));
2351 }
2353 // In MRC mode, we do a load+autorelease.
2354 if (!getLangOpts().ObjCAutoRefCount) {
2356 }
2357
2358 // In ARC mode, we load retained and then consume the value.
2359 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2360 Object = EmitObjCConsumeObject(LV.getType(), Object);
2361 return RValue::get(Object);
2362 }
2363
2364 if (LV.isSimple()) {
2365 assert(!LV.getType()->isFunctionType());
2366
2367 if (LV.getType()->isConstantMatrixType())
2368 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2369
2370 // Everything needs a load.
2371 return RValue::get(EmitLoadOfScalar(LV, Loc));
2372 }
2373
2374 if (LV.isVectorElt()) {
2375 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2376 LV.isVolatileQualified());
2377 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2378 "vecext"));
2379 }
2380
2381 // If this is a reference to a subset of the elements of a vector, either
2382 // shuffle the input or extract/insert them as appropriate.
2383 if (LV.isExtVectorElt()) {
2385 }
2386
2387 // Global Register variables always invoke intrinsics
2388 if (LV.isGlobalReg())
2389 return EmitLoadOfGlobalRegLValue(LV);
2390
2391 if (LV.isMatrixElt()) {
2392 llvm::Value *Idx = LV.getMatrixIdx();
2393 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2394 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2395 llvm::MatrixBuilder MB(Builder);
2396 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2397 }
2398 llvm::LoadInst *Load =
2399 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2400 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2401 }
2402
2403 assert(LV.isBitField() && "Unknown LValue type!");
2404 return EmitLoadOfBitfieldLValue(LV, Loc);
2405}
2406
2408 SourceLocation Loc) {
2409 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2410
2411 // Get the output type.
2412 llvm::Type *ResLTy = ConvertType(LV.getType());
2413
2414 Address Ptr = LV.getBitFieldAddress();
2415 llvm::Value *Val =
2416 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2417
2418 bool UseVolatile = LV.isVolatileQualified() &&
2419 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2420 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2421 const unsigned StorageSize =
2422 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2423 if (Info.IsSigned) {
2424 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2425 unsigned HighBits = StorageSize - Offset - Info.Size;
2426 if (HighBits)
2427 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2428 if (Offset + HighBits)
2429 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2430 } else {
2431 if (Offset)
2432 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2433 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2434 Val = Builder.CreateAnd(
2435 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2436 }
2437 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2438 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2439 return RValue::get(Val);
2440}
2441
2442// If this is a reference to a subset of the elements of a vector, create an
2443// appropriate shufflevector.
2445 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2446 LV.isVolatileQualified());
2447
2448 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2449 // IR value to a vector here allows the rest of codegen to behave as normal.
2450 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2451 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2452 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2453 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2454 }
2455
2456 const llvm::Constant *Elts = LV.getExtVectorElts();
2457
2458 // If the result of the expression is a non-vector type, we must be extracting
2459 // a single element. Just codegen as an extractelement.
2460 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2461 if (!ExprVT) {
2462 unsigned InIdx = getAccessedFieldNo(0, Elts);
2463 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2464
2465 llvm::Value *Element = Builder.CreateExtractElement(Vec, Elt);
2466
2467 llvm::Type *LVTy = ConvertType(LV.getType());
2468 if (Element->getType()->getPrimitiveSizeInBits() >
2469 LVTy->getPrimitiveSizeInBits())
2470 Element = Builder.CreateTrunc(Element, LVTy);
2471
2472 return RValue::get(Element);
2473 }
2474
2475 // Always use shuffle vector to try to retain the original program structure
2476 unsigned NumResultElts = ExprVT->getNumElements();
2477
2479 for (unsigned i = 0; i != NumResultElts; ++i)
2480 Mask.push_back(getAccessedFieldNo(i, Elts));
2481
2482 Vec = Builder.CreateShuffleVector(Vec, Mask);
2483
2484 if (LV.getType()->isExtVectorBoolType())
2485 Vec = Builder.CreateTrunc(Vec, ConvertType(LV.getType()), "truncv");
2486
2487 return RValue::get(Vec);
2488}
2489
2490/// Generates lvalue for partial ext_vector access.
2492 Address VectorAddress = LV.getExtVectorAddress();
2493 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2494 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2495
2496 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2497
2498 const llvm::Constant *Elts = LV.getExtVectorElts();
2499 unsigned ix = getAccessedFieldNo(0, Elts);
2500
2501 Address VectorBasePtrPlusIx =
2502 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2503 "vector.elt");
2504
2505 return VectorBasePtrPlusIx;
2506}
2507
2508/// Load of global named registers are always calls to intrinsics.
2510 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2511 "Bad type for register variable");
2512 llvm::MDNode *RegName = cast<llvm::MDNode>(
2513 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2514
2515 // We accept integer and pointer types only
2516 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2517 llvm::Type *Ty = OrigTy;
2518 if (OrigTy->isPointerTy())
2519 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2520 llvm::Type *Types[] = { Ty };
2521
2522 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2523 llvm::Value *Call = Builder.CreateCall(
2524 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2525 if (OrigTy->isPointerTy())
2526 Call = Builder.CreateIntToPtr(Call, OrigTy);
2527 return RValue::get(Call);
2528}
2529
2530/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2531/// lvalue, where both are guaranteed to the have the same type, and that type
2532/// is 'Ty'.
2534 bool isInit) {
2535 if (!Dst.isSimple()) {
2536 if (Dst.isVectorElt()) {
2537 // Read/modify/write the vector, inserting the new element.
2538 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2539 Dst.isVolatileQualified());
2540 llvm::Type *VecTy = Vec->getType();
2541 llvm::Value *SrcVal = Src.getScalarVal();
2542
2543 if (SrcVal->getType()->getPrimitiveSizeInBits() <
2544 VecTy->getScalarSizeInBits())
2545 SrcVal = Builder.CreateZExt(SrcVal, VecTy->getScalarType());
2546
2547 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2548 if (IRStoreTy) {
2549 auto *IRVecTy = llvm::FixedVectorType::get(
2550 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2551 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2552 // iN --> <N x i1>.
2553 }
2554
2555 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2556 // types which are mapped to vector LLVM IR types (e.g. for implementing
2557 // an ABI).
2558 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2559 EltTy && EltTy->getNumElements() == 1)
2560 SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2561
2562 Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2563 "vecins");
2564 if (IRStoreTy) {
2565 // <N x i1> --> <iN>.
2566 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2567 }
2568
2569 auto *I = Builder.CreateStore(Vec, Dst.getVectorAddress(),
2570 Dst.isVolatileQualified());
2572 return;
2573 }
2574
2575 // If this is an update of extended vector elements, insert them as
2576 // appropriate.
2577 if (Dst.isExtVectorElt())
2579
2580 if (Dst.isGlobalReg())
2581 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2582
2583 if (Dst.isMatrixElt()) {
2584 llvm::Value *Idx = Dst.getMatrixIdx();
2585 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2586 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2587 llvm::MatrixBuilder MB(Builder);
2588 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2589 }
2590 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2591 llvm::Value *Vec =
2592 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2593 auto *I = Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2594 Dst.isVolatileQualified());
2596 return;
2597 }
2598
2599 assert(Dst.isBitField() && "Unknown LValue type");
2600 return EmitStoreThroughBitfieldLValue(Src, Dst);
2601 }
2602
2603 // Handle __ptrauth qualification by re-signing the value.
2604 if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) {
2605 Src = RValue::get(EmitPointerAuthQualify(PointerAuth, Src.getScalarVal(),
2606 Dst.getType(), Dst.getAddress(),
2607 /*known nonnull*/ false));
2608 }
2609
2610 // There's special magic for assigning into an ARC-qualified l-value.
2611 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2612 switch (Lifetime) {
2614 llvm_unreachable("present but none");
2615
2617 // nothing special
2618 break;
2619
2621 if (isInit) {
2622 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2623 break;
2624 }
2625 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2626 return;
2627
2629 if (isInit)
2630 // Initialize and then skip the primitive store.
2632 else
2634 /*ignore*/ true);
2635 return;
2636
2639 Src.getScalarVal()));
2640 // fall into the normal path
2641 break;
2642 }
2643 }
2644
2645 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2646 // load of a __weak object.
2647 Address LvalueDst = Dst.getAddress();
2648 llvm::Value *src = Src.getScalarVal();
2649 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2650 return;
2651 }
2652
2653 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2654 // load of a __strong object.
2655 Address LvalueDst = Dst.getAddress();
2656 llvm::Value *src = Src.getScalarVal();
2657 if (Dst.isObjCIvar()) {
2658 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2659 llvm::Type *ResultType = IntPtrTy;
2661 llvm::Value *RHS = dst.emitRawPointer(*this);
2662 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2663 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2664 ResultType, "sub.ptr.lhs.cast");
2665 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2666 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2667 } else if (Dst.isGlobalObjCRef()) {
2668 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2669 Dst.isThreadLocalRef());
2670 }
2671 else
2672 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2673 return;
2674 }
2675
2676 assert(Src.isScalar() && "Can't emit an agg store with this method");
2677 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2678}
2679
2681 llvm::Value **Result) {
2682 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2683 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2684 Address Ptr = Dst.getBitFieldAddress();
2685
2686 // Get the source value, truncated to the width of the bit-field.
2687 llvm::Value *SrcVal = Src.getScalarVal();
2688
2689 // Cast the source to the storage type and shift it into place.
2690 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2691 /*isSigned=*/false);
2692 llvm::Value *MaskedVal = SrcVal;
2693
2694 const bool UseVolatile =
2695 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2696 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2697 const unsigned StorageSize =
2698 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2699 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2700 // See if there are other bits in the bitfield's storage we'll need to load
2701 // and mask together with source before storing.
2702 if (StorageSize != Info.Size) {
2703 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2704 llvm::Value *Val =
2705 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2706
2707 // Mask the source value as needed.
2708 if (!Dst.getType()->hasBooleanRepresentation())
2709 SrcVal = Builder.CreateAnd(
2710 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2711 "bf.value");
2712 MaskedVal = SrcVal;
2713 if (Offset)
2714 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2715
2716 // Mask out the original value.
2717 Val = Builder.CreateAnd(
2718 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2719 "bf.clear");
2720
2721 // Or together the unchanged values and the source value.
2722 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2723 } else {
2724 assert(Offset == 0);
2725 // According to the AACPS:
2726 // When a volatile bit-field is written, and its container does not overlap
2727 // with any non-bit-field member, its container must be read exactly once
2728 // and written exactly once using the access width appropriate to the type
2729 // of the container. The two accesses are not atomic.
2730 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2731 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2732 Builder.CreateLoad(Ptr, true, "bf.load");
2733 }
2734
2735 // Write the new value back out.
2736 auto *I = Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2737 addInstToCurrentSourceAtom(I, SrcVal);
2738
2739 // Return the new value of the bit-field, if requested.
2740 if (Result) {
2741 llvm::Value *ResultVal = MaskedVal;
2742
2743 // Sign extend the value if needed.
2744 if (Info.IsSigned) {
2745 assert(Info.Size <= StorageSize);
2746 unsigned HighBits = StorageSize - Info.Size;
2747 if (HighBits) {
2748 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2749 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2750 }
2751 }
2752
2753 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2754 "bf.result.cast");
2755 *Result = EmitFromMemory(ResultVal, Dst.getType());
2756 }
2757}
2758
2760 LValue Dst) {
2761 llvm::Value *SrcVal = Src.getScalarVal();
2762 Address DstAddr = Dst.getExtVectorAddress();
2763 if (DstAddr.getElementType()->getScalarSizeInBits() >
2764 SrcVal->getType()->getScalarSizeInBits())
2765 SrcVal = Builder.CreateZExt(
2766 SrcVal, convertTypeForLoadStore(Dst.getType(), SrcVal->getType()));
2767
2768 // HLSL allows storing to scalar values through ExtVector component LValues.
2769 // To support this we need to handle the case where the destination address is
2770 // a scalar.
2771 if (!DstAddr.getElementType()->isVectorTy()) {
2772 assert(!Dst.getType()->isVectorType() &&
2773 "this should only occur for non-vector l-values");
2774 Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified());
2775 return;
2776 }
2777
2778 // This access turns into a read/modify/write of the vector. Load the input
2779 // value now.
2780 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2781 llvm::Type *VecTy = Vec->getType();
2782 const llvm::Constant *Elts = Dst.getExtVectorElts();
2783
2784 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2785 unsigned NumSrcElts = VTy->getNumElements();
2786 unsigned NumDstElts = cast<llvm::FixedVectorType>(VecTy)->getNumElements();
2787 if (NumDstElts == NumSrcElts) {
2788 // Use shuffle vector is the src and destination are the same number of
2789 // elements and restore the vector mask since it is on the side it will be
2790 // stored.
2791 SmallVector<int, 4> Mask(NumDstElts);
2792 for (unsigned i = 0; i != NumSrcElts; ++i)
2793 Mask[getAccessedFieldNo(i, Elts)] = i;
2794
2795 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2796 } else if (NumDstElts > NumSrcElts) {
2797 // Extended the source vector to the same length and then shuffle it
2798 // into the destination.
2799 // FIXME: since we're shuffling with undef, can we just use the indices
2800 // into that? This could be simpler.
2801 SmallVector<int, 4> ExtMask;
2802 for (unsigned i = 0; i != NumSrcElts; ++i)
2803 ExtMask.push_back(i);
2804 ExtMask.resize(NumDstElts, -1);
2805 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2806 // build identity
2808 for (unsigned i = 0; i != NumDstElts; ++i)
2809 Mask.push_back(i);
2810
2811 // When the vector size is odd and .odd or .hi is used, the last element
2812 // of the Elts constant array will be one past the size of the vector.
2813 // Ignore the last element here, if it is greater than the mask size.
2814 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2815 NumSrcElts--;
2816
2817 // modify when what gets shuffled in
2818 for (unsigned i = 0; i != NumSrcElts; ++i)
2819 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2820 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2821 } else {
2822 // We should never shorten the vector
2823 llvm_unreachable("unexpected shorten vector length");
2824 }
2825 } else {
2826 // If the Src is a scalar (not a vector), and the target is a vector it must
2827 // be updating one element.
2828 unsigned InIdx = getAccessedFieldNo(0, Elts);
2829 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2830
2831 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2832 }
2833
2834 Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
2835 Dst.isVolatileQualified());
2836}
2837
2838/// Store of global named registers are always calls to intrinsics.
2840 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2841 "Bad type for register variable");
2842 llvm::MDNode *RegName = cast<llvm::MDNode>(
2843 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2844 assert(RegName && "Register LValue is not metadata");
2845
2846 // We accept integer and pointer types only
2847 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2848 llvm::Type *Ty = OrigTy;
2849 if (OrigTy->isPointerTy())
2850 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2851 llvm::Type *Types[] = { Ty };
2852
2853 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2854 llvm::Value *Value = Src.getScalarVal();
2855 if (OrigTy->isPointerTy())
2856 Value = Builder.CreatePtrToInt(Value, Ty);
2857 Builder.CreateCall(
2858 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2859}
2860
2861// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2862// generating write-barries API. It is currently a global, ivar,
2863// or neither.
2864static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2865 LValue &LV,
2866 bool IsMemberAccess=false) {
2867 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2868 return;
2869
2870 if (isa<ObjCIvarRefExpr>(E)) {
2871 QualType ExpTy = E->getType();
2872 if (IsMemberAccess && ExpTy->isPointerType()) {
2873 // If ivar is a structure pointer, assigning to field of
2874 // this struct follows gcc's behavior and makes it a non-ivar
2875 // writer-barrier conservatively.
2876 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2877 if (ExpTy->isRecordType()) {
2878 LV.setObjCIvar(false);
2879 return;
2880 }
2881 }
2882 LV.setObjCIvar(true);
2883 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2884 LV.setBaseIvarExp(Exp->getBase());
2885 LV.setObjCArray(E->getType()->isArrayType());
2886 return;
2887 }
2888
2889 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2890 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2891 if (VD->hasGlobalStorage()) {
2892 LV.setGlobalObjCRef(true);
2893 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2894 }
2895 }
2896 LV.setObjCArray(E->getType()->isArrayType());
2897 return;
2898 }
2899
2900 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2901 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2902 return;
2903 }
2904
2905 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2906 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2907 if (LV.isObjCIvar()) {
2908 // If cast is to a structure pointer, follow gcc's behavior and make it
2909 // a non-ivar write-barrier.
2910 QualType ExpTy = E->getType();
2911 if (ExpTy->isPointerType())
2912 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2913 if (ExpTy->isRecordType())
2914 LV.setObjCIvar(false);
2915 }
2916 return;
2917 }
2918
2919 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2920 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2921 return;
2922 }
2923
2924 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2925 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2926 return;
2927 }
2928
2929 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2930 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2931 return;
2932 }
2933
2934 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2935 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2936 return;
2937 }
2938
2939 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2940 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2941 if (LV.isObjCIvar() && !LV.isObjCArray())
2942 // Using array syntax to assigning to what an ivar points to is not
2943 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2944 LV.setObjCIvar(false);
2945 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2946 // Using array syntax to assigning to what global points to is not
2947 // same as assigning to the global itself. {id *G;} G[i] = 0;
2948 LV.setGlobalObjCRef(false);
2949 return;
2950 }
2951
2952 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2953 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2954 // We don't know if member is an 'ivar', but this flag is looked at
2955 // only in the context of LV.isObjCIvar().
2956 LV.setObjCArray(E->getType()->isArrayType());
2957 return;
2958 }
2959}
2960
2962 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2963 llvm::Type *RealVarTy, SourceLocation Loc) {
2964 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2966 CGF, VD, Addr, Loc);
2967 else
2968 Addr =
2969 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2970
2971 Addr = Addr.withElementType(RealVarTy);
2973}
2974
2976 const VarDecl *VD, QualType T) {
2977 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2978 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2979 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2980 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2981 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2982 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2983 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2985 return Address::invalid();
2986 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2987 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2988 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2990 "Expected link clause OR to clause with unified memory enabled.");
2991 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2993 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2994}
2995
2996Address
2998 LValueBaseInfo *PointeeBaseInfo,
2999 TBAAAccessInfo *PointeeTBAAInfo) {
3000 llvm::LoadInst *Load =
3001 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
3002 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
3003 QualType PTy = RefLVal.getType()->getPointeeType();
3004 CharUnits Align = CGM.getNaturalTypeAlignment(
3005 PTy, PointeeBaseInfo, PointeeTBAAInfo, /*ForPointeeType=*/true);
3006 if (!PTy->isIncompleteType()) {
3007 llvm::LLVMContext &Ctx = getLLVMContext();
3008 llvm::MDBuilder MDB(Ctx);
3009 // Emit !nonnull metadata
3010 if (CGM.getTypes().getTargetAddressSpace(PTy) == 0 &&
3011 !CGM.getCodeGenOpts().NullPointerIsValid)
3012 Load->setMetadata(llvm::LLVMContext::MD_nonnull,
3013 llvm::MDNode::get(Ctx, {}));
3014 // Emit !align metadata
3015 if (PTy->isObjectType()) {
3016 auto AlignVal = Align.getQuantity();
3017 if (AlignVal > 1) {
3018 Load->setMetadata(
3019 llvm::LLVMContext::MD_align,
3020 llvm::MDNode::get(Ctx, MDB.createConstant(llvm::ConstantInt::get(
3021 Builder.getInt64Ty(), AlignVal))));
3022 }
3023 }
3024 }
3025 return makeNaturalAddressForPointer(Load, PTy, Align,
3026 /*ForPointeeType=*/true, PointeeBaseInfo,
3027 PointeeTBAAInfo);
3028}
3029
3031 LValueBaseInfo PointeeBaseInfo;
3032 TBAAAccessInfo PointeeTBAAInfo;
3033 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
3034 &PointeeTBAAInfo);
3035 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
3036 PointeeBaseInfo, PointeeTBAAInfo);
3037}
3038
3040 const PointerType *PtrTy,
3041 LValueBaseInfo *BaseInfo,
3042 TBAAAccessInfo *TBAAInfo) {
3043 llvm::Value *Addr = Builder.CreateLoad(Ptr);
3044 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
3045 CharUnits(), /*ForPointeeType=*/true,
3046 BaseInfo, TBAAInfo);
3047}
3048
3050 const PointerType *PtrTy) {
3051 LValueBaseInfo BaseInfo;
3052 TBAAAccessInfo TBAAInfo;
3053 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
3054 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
3055}
3056
3058 const Expr *E, const VarDecl *VD) {
3059 QualType T = E->getType();
3060
3061 // If it's thread_local, emit a call to its wrapper function instead.
3062 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3064 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
3065 // Check if the variable is marked as declare target with link clause in
3066 // device codegen.
3067 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
3069 if (Addr.isValid())
3071 }
3072
3073 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
3074
3075 if (VD->getTLSKind() != VarDecl::TLS_None)
3076 V = CGF.Builder.CreateThreadLocalAddress(V);
3077
3078 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
3079 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
3080 Address Addr(V, RealVarTy, Alignment);
3081 // Emit reference to the private copy of the variable if it is an OpenMP
3082 // threadprivate variable.
3083 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
3084 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3085 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
3086 E->getExprLoc());
3087 }
3088 LValue LV = VD->getType()->isReferenceType() ?
3092 setObjCGCLValueClass(CGF.getContext(), E, LV);
3093 return LV;
3094}
3095
3097 llvm::Type *Ty) {
3098 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3099 if (FD->hasAttr<WeakRefAttr>()) {
3101 return aliasee.getPointer();
3102 }
3103
3104 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
3105 return V;
3106}
3107
3108static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
3109 GlobalDecl GD) {
3110 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3111 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
3112 QualType ETy = E->getType();
3114 if (auto *GV = dyn_cast<llvm::GlobalValue>(V))
3115 V = llvm::NoCFIValue::get(GV);
3116 }
3117 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
3118 return CGF.MakeAddrLValue(V, ETy, Alignment, AlignmentSource::Decl);
3119}
3120
3122 llvm::Value *ThisValue) {
3123
3124 return CGF.EmitLValueForLambdaField(FD, ThisValue);
3125}
3126
3127/// Named Registers are named metadata pointing to the register name
3128/// which will be read from/written to as an argument to the intrinsic
3129/// @llvm.read/write_register.
3130/// So far, only the name is being passed down, but other options such as
3131/// register type, allocation type or even optimization options could be
3132/// passed down via the metadata node.
3133static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
3134 SmallString<64> Name("llvm.named.register.");
3135 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
3136 assert(Asm->getLabel().size() < 64-Name.size() &&
3137 "Register name too big");
3138 Name.append(Asm->getLabel());
3139 llvm::NamedMDNode *M =
3140 CGM.getModule().getOrInsertNamedMetadata(Name);
3141 if (M->getNumOperands() == 0) {
3142 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
3143 Asm->getLabel());
3144 llvm::Metadata *Ops[] = {Str};
3145 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
3146 }
3147
3148 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
3149
3150 llvm::Value *Ptr =
3151 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
3152 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
3153}
3154
3155/// Determine whether we can emit a reference to \p VD from the current
3156/// context, despite not necessarily having seen an odr-use of the variable in
3157/// this context.
3159 const DeclRefExpr *E,
3160 const VarDecl *VD) {
3161 // For a variable declared in an enclosing scope, do not emit a spurious
3162 // reference even if we have a capture, as that will emit an unwarranted
3163 // reference to our capture state, and will likely generate worse code than
3164 // emitting a local copy.
3166 return false;
3167
3168 // For a local declaration declared in this function, we can always reference
3169 // it even if we don't have an odr-use.
3170 if (VD->hasLocalStorage()) {
3171 return VD->getDeclContext() ==
3172 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
3173 }
3174
3175 // For a global declaration, we can emit a reference to it if we know
3176 // for sure that we are able to emit a definition of it.
3177 VD = VD->getDefinition(CGF.getContext());
3178 if (!VD)
3179 return false;
3180
3181 // Don't emit a spurious reference if it might be to a variable that only
3182 // exists on a different device / target.
3183 // FIXME: This is unnecessarily broad. Check whether this would actually be a
3184 // cross-target reference.
3185 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3186 CGF.getLangOpts().OpenCL) {
3187 return false;
3188 }
3189
3190 // We can emit a spurious reference only if the linkage implies that we'll
3191 // be emitting a non-interposable symbol that will be retained until link
3192 // time.
3193 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3194 case llvm::GlobalValue::ExternalLinkage:
3195 case llvm::GlobalValue::LinkOnceODRLinkage:
3196 case llvm::GlobalValue::WeakODRLinkage:
3197 case llvm::GlobalValue::InternalLinkage:
3198 case llvm::GlobalValue::PrivateLinkage:
3199 return true;
3200 default:
3201 return false;
3202 }
3203}
3204
3206 const NamedDecl *ND = E->getDecl();
3207 QualType T = E->getType();
3208
3209 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3210 "should not emit an unevaluated operand");
3211
3212 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3213 // Global Named registers access via intrinsics only
3214 if (VD->getStorageClass() == SC_Register &&
3215 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3216 return EmitGlobalNamedRegister(VD, CGM);
3217
3218 // If this DeclRefExpr does not constitute an odr-use of the variable,
3219 // we're not permitted to emit a reference to it in general, and it might
3220 // not be captured if capture would be necessary for a use. Emit the
3221 // constant value directly instead.
3222 if (E->isNonOdrUse() == NOUR_Constant &&
3223 (VD->getType()->isReferenceType() ||
3224 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3225 VD->getAnyInitializer(VD);
3226 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3227 E->getLocation(), *VD->evaluateValue(), VD->getType());
3228 assert(Val && "failed to emit constant expression");
3229
3231 if (!VD->getType()->isReferenceType()) {
3232 // Spill the constant value to a global.
3233 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3234 getContext().getDeclAlign(VD));
3235 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3236 auto *PTy = llvm::PointerType::get(
3237 getLLVMContext(), getTypes().getTargetAddressSpace(VD->getType()));
3238 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3239 } else {
3240 // Should we be using the alignment of the constant pointer we emitted?
3241 CharUnits Alignment =
3242 CGM.getNaturalTypeAlignment(E->getType(),
3243 /* BaseInfo= */ nullptr,
3244 /* TBAAInfo= */ nullptr,
3245 /* forPointeeType= */ true);
3246 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3247 }
3249 }
3250
3251 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3252
3253 // Check for captured variables.
3255 VD = VD->getCanonicalDecl();
3256 if (auto *FD = LambdaCaptureFields.lookup(VD))
3257 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3258 if (CapturedStmtInfo) {
3259 auto I = LocalDeclMap.find(VD);
3260 if (I != LocalDeclMap.end()) {
3261 LValue CapLVal;
3262 if (VD->getType()->isReferenceType())
3263 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3265 else
3266 CapLVal = MakeAddrLValue(I->second, T);
3267 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3268 // in simd context.
3269 if (getLangOpts().OpenMP &&
3270 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3271 CapLVal.setNontemporal(/*Value=*/true);
3272 return CapLVal;
3273 }
3274 LValue CapLVal =
3275 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
3276 CapturedStmtInfo->getContextValue());
3277 Address LValueAddress = CapLVal.getAddress();
3278 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3279 LValueAddress.getElementType(),
3280 getContext().getDeclAlign(VD)),
3281 CapLVal.getType(),
3283 CapLVal.getTBAAInfo());
3284 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3285 // in simd context.
3286 if (getLangOpts().OpenMP &&
3287 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3288 CapLVal.setNontemporal(/*Value=*/true);
3289 return CapLVal;
3290 }
3291
3292 assert(isa<BlockDecl>(CurCodeDecl));
3293 Address addr = GetAddrOfBlockDecl(VD);
3294 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3295 }
3296 }
3297
3298 // FIXME: We should be able to assert this for FunctionDecls as well!
3299 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3300 // those with a valid source location.
3301 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3302 !E->getLocation().isValid()) &&
3303 "Should not use decl without marking it used!");
3304
3305 if (ND->hasAttr<WeakRefAttr>()) {
3306 const auto *VD = cast<ValueDecl>(ND);
3307 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3308 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3309 }
3310
3311 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3312 // Check if this is a global variable.
3313 if (VD->hasLinkage() || VD->isStaticDataMember())
3314 return EmitGlobalVarDeclLValue(*this, E, VD);
3315
3316 Address addr = Address::invalid();
3317
3318 // The variable should generally be present in the local decl map.
3319 auto iter = LocalDeclMap.find(VD);
3320 if (iter != LocalDeclMap.end()) {
3321 addr = iter->second;
3322
3323 // Otherwise, it might be static local we haven't emitted yet for
3324 // some reason; most likely, because it's in an outer function.
3325 } else if (VD->isStaticLocal()) {
3326 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3327 *VD, CGM.getLLVMLinkageVarDefinition(VD));
3328 addr = Address(
3329 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3330
3331 // No other cases for now.
3332 } else {
3333 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3334 }
3335
3336 // Handle threadlocal function locals.
3337 if (VD->getTLSKind() != VarDecl::TLS_None)
3338 addr = addr.withPointer(
3339 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3341
3342 // Check for OpenMP threadprivate variables.
3343 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3344 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3346 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3347 E->getExprLoc());
3348 }
3349
3350 // Drill into block byref variables.
3351 bool isBlockByref = VD->isEscapingByref();
3352 if (isBlockByref) {
3353 addr = emitBlockByrefAddress(addr, VD);
3354 }
3355
3356 // Drill into reference types.
3357 LValue LV = VD->getType()->isReferenceType() ?
3360
3361 bool isLocalStorage = VD->hasLocalStorage();
3362
3363 bool NonGCable = isLocalStorage &&
3364 !VD->getType()->isReferenceType() &&
3365 !isBlockByref;
3366 if (NonGCable) {
3368 LV.setNonGC(true);
3369 }
3370
3371 bool isImpreciseLifetime =
3372 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3373 if (isImpreciseLifetime)
3376 return LV;
3377 }
3378
3379 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3380 return EmitFunctionDeclLValue(*this, E, FD);
3381
3382 // FIXME: While we're emitting a binding from an enclosing scope, all other
3383 // DeclRefExprs we see should be implicitly treated as if they also refer to
3384 // an enclosing scope.
3385 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3387 auto *FD = LambdaCaptureFields.lookup(BD);
3388 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3389 }
3390 // Suppress debug location updates when visiting the binding, since the
3391 // binding may emit instructions that would otherwise be associated with the
3392 // binding itself, rather than the expression referencing the binding. (this
3393 // leads to jumpy debug stepping behavior where the location/debugger jump
3394 // back to the binding declaration, then back to the expression referencing
3395 // the binding)
3397 return EmitLValue(BD->getBinding(), NotKnownNonNull);
3398 }
3399
3400 // We can form DeclRefExprs naming GUID declarations when reconstituting
3401 // non-type template parameters into expressions.
3402 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3403 return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3405
3406 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3407 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3408 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3409
3410 if (AS != T.getAddressSpace()) {
3411 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3412 auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3413 auto ASC = getTargetHooks().performAddrSpaceCast(CGM, ATPO.getPointer(),
3414 AS, PtrTy);
3415 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3416 }
3417
3418 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3419 }
3420
3421 llvm_unreachable("Unhandled DeclRefExpr");
3422}
3423
3425 // __extension__ doesn't affect lvalue-ness.
3426 if (E->getOpcode() == UO_Extension)
3427 return EmitLValue(E->getSubExpr());
3428
3430 switch (E->getOpcode()) {
3431 default: llvm_unreachable("Unknown unary operator lvalue!");
3432 case UO_Deref: {
3434 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3435
3436 LValueBaseInfo BaseInfo;
3437 TBAAAccessInfo TBAAInfo;
3439 &TBAAInfo);
3440 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3442
3443 // We should not generate __weak write barrier on indirect reference
3444 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3445 // But, we continue to generate __strong write barrier on indirect write
3446 // into a pointer to object.
3447 if (getLangOpts().ObjC &&
3448 getLangOpts().getGC() != LangOptions::NonGC &&
3449 LV.isObjCWeak())
3451 return LV;
3452 }
3453 case UO_Real:
3454 case UO_Imag: {
3455 LValue LV = EmitLValue(E->getSubExpr());
3456 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3457
3458 // __real is valid on scalars. This is a faster way of testing that.
3459 // __imag can only produce an rvalue on scalars.
3460 if (E->getOpcode() == UO_Real &&
3461 !LV.getAddress().getElementType()->isStructTy()) {
3462 assert(E->getSubExpr()->getType()->isArithmeticType());
3463 return LV;
3464 }
3465
3466 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3467
3468 Address Component =
3469 (E->getOpcode() == UO_Real
3472 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3473 CGM.getTBAAInfoForSubobject(LV, T));
3474 ElemLV.getQuals().addQualifiers(LV.getQuals());
3475 return ElemLV;
3476 }
3477 case UO_PreInc:
3478 case UO_PreDec: {
3479 LValue LV = EmitLValue(E->getSubExpr());
3480 bool isInc = E->getOpcode() == UO_PreInc;
3481
3482 if (E->getType()->isAnyComplexType())
3483 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3484 else
3485 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3486 return LV;
3487 }
3488 }
3489}
3490
3492 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3494}
3495
3497 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3499}
3500
3502 auto SL = E->getFunctionName();
3503 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3504 StringRef FnName = CurFn->getName();
3505 FnName.consume_front("\01");
3506 StringRef NameItems[] = {
3508 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3509 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3510 std::string Name = std::string(SL->getString());
3511 if (!Name.empty()) {
3512 unsigned Discriminator =
3513 CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3514 if (Discriminator)
3515 Name += "_" + Twine(Discriminator + 1).str();
3516 auto C = CGM.GetAddrOfConstantCString(Name, GVName);
3518 } else {
3519 auto C = CGM.GetAddrOfConstantCString(std::string(FnName), GVName);
3521 }
3522 }
3523 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3525}
3526
3527/// Emit a type description suitable for use by a runtime sanitizer library. The
3528/// format of a type descriptor is
3529///
3530/// \code
3531/// { i16 TypeKind, i16 TypeInfo }
3532/// \endcode
3533///
3534/// followed by an array of i8 containing the type name with extra information
3535/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3536/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3537/// anything else.
3539 // Only emit each type's descriptor once.
3540 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3541 return C;
3542
3543 uint16_t TypeKind = TK_Unknown;
3544 uint16_t TypeInfo = 0;
3545 bool IsBitInt = false;
3546
3547 if (T->isIntegerType()) {
3548 TypeKind = TK_Integer;
3549 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3550 (T->isSignedIntegerType() ? 1 : 0);
3551 // Follow suggestion from discussion of issue 64100.
3552 // So we can write the exact amount of bits in TypeName after '\0'
3553 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3554 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3555 // Do a sanity checks as we are using 32-bit type to store bit length.
3556 assert(getContext().getTypeSize(T) > 0 &&
3557 " non positive amount of bits in __BitInt type");
3558 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3559 " too many bits in __BitInt type");
3560
3561 // Redefine TypeKind with the actual __BitInt type if we have signed
3562 // BitInt.
3563 TypeKind = TK_BitInt;
3564 IsBitInt = true;
3565 }
3566 } else if (T->isFloatingType()) {
3567 TypeKind = TK_Float;
3569 }
3570
3571 // Format the type name as if for a diagnostic, including quotes and
3572 // optionally an 'aka'.
3573 SmallString<32> Buffer;
3574 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
3575 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3576 StringRef(), {}, Buffer, {});
3577
3578 if (IsBitInt) {
3579 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3580 // endianness, zero.
3581 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3582 const auto *EIT = T->castAs<BitIntType>();
3583 uint32_t Bits = EIT->getNumBits();
3584 llvm::support::endian::write32(S + 1, Bits,
3585 getTarget().isBigEndian()
3586 ? llvm::endianness::big
3587 : llvm::endianness::little);
3588 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3589 Buffer.append(Str);
3590 }
3591
3592 llvm::Constant *Components[] = {
3593 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3594 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3595 };
3596 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3597
3598 auto *GV = new llvm::GlobalVariable(
3599 CGM.getModule(), Descriptor->getType(),
3600 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3601 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3602 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3603
3604 // Remember the descriptor for this type.
3605 CGM.setTypeDescriptorInMap(T, GV);
3606
3607 return GV;
3608}
3609
3610llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3611 llvm::Type *TargetTy = IntPtrTy;
3612
3613 if (V->getType() == TargetTy)
3614 return V;
3615
3616 // Floating-point types which fit into intptr_t are bitcast to integers
3617 // and then passed directly (after zero-extension, if necessary).
3618 if (V->getType()->isFloatingPointTy()) {
3619 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3620 if (Bits <= TargetTy->getIntegerBitWidth())
3621 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3622 Bits));
3623 }
3624
3625 // Integers which fit in intptr_t are zero-extended and passed directly.
3626 if (V->getType()->isIntegerTy() &&
3627 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3628 return Builder.CreateZExt(V, TargetTy);
3629
3630 // Pointers are passed directly, everything else is passed by address.
3631 if (!V->getType()->isPointerTy()) {
3632 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3633 Builder.CreateStore(V, Ptr);
3634 V = Ptr.getPointer();
3635 }
3636 return Builder.CreatePtrToInt(V, TargetTy);
3637}
3638
3639/// Emit a representation of a SourceLocation for passing to a handler
3640/// in a sanitizer runtime library. The format for this data is:
3641/// \code
3642/// struct SourceLocation {
3643/// const char *Filename;
3644/// int32_t Line, Column;
3645/// };
3646/// \endcode
3647/// For an invalid SourceLocation, the Filename pointer is null.
3649 llvm::Constant *Filename;
3650 int Line, Column;
3651
3653 if (PLoc.isValid()) {
3654 StringRef FilenameString = PLoc.getFilename();
3655
3656 int PathComponentsToStrip =
3657 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3658 if (PathComponentsToStrip < 0) {
3659 assert(PathComponentsToStrip != INT_MIN);
3660 int PathComponentsToKeep = -PathComponentsToStrip;
3661 auto I = llvm::sys::path::rbegin(FilenameString);
3662 auto E = llvm::sys::path::rend(FilenameString);
3663 while (I != E && --PathComponentsToKeep)
3664 ++I;
3665
3666 FilenameString = FilenameString.substr(I - E);
3667 } else if (PathComponentsToStrip > 0) {
3668 auto I = llvm::sys::path::begin(FilenameString);
3669 auto E = llvm::sys::path::end(FilenameString);
3670 while (I != E && PathComponentsToStrip--)
3671 ++I;
3672
3673 if (I != E)
3674 FilenameString =
3675 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3676 else
3677 FilenameString = llvm::sys::path::filename(FilenameString);
3678 }
3679
3680 auto FilenameGV =
3681 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3682 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
3684 FilenameGV.getPointer()->stripPointerCasts()));
3685 Filename = FilenameGV.getPointer();
3686 Line = PLoc.getLine();
3687 Column = PLoc.getColumn();
3688 } else {
3689 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3690 Line = Column = 0;
3691 }
3692
3693 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3694 Builder.getInt32(Column)};
3695
3696 return llvm::ConstantStruct::getAnon(Data);
3697}
3698
3699namespace {
3700/// Specify under what conditions this check can be recovered
3701enum class CheckRecoverableKind {
3702 /// Always terminate program execution if this check fails.
3704 /// Check supports recovering, runtime has both fatal (noreturn) and
3705 /// non-fatal handlers for this check.
3706 Recoverable,
3707 /// Runtime conditionally aborts, always need to support recovery.
3709};
3710}
3711
3712static CheckRecoverableKind
3714 if (Ordinal == SanitizerKind::SO_Vptr)
3715 return CheckRecoverableKind::AlwaysRecoverable;
3716 else if (Ordinal == SanitizerKind::SO_Return ||
3717 Ordinal == SanitizerKind::SO_Unreachable)
3718 return CheckRecoverableKind::Unrecoverable;
3719 else
3720 return CheckRecoverableKind::Recoverable;
3721}
3722
3723namespace {
3724struct SanitizerHandlerInfo {
3725 char const *const Name;
3726 unsigned Version;
3727};
3728}
3729
3730const SanitizerHandlerInfo SanitizerHandlers[] = {
3731#define SANITIZER_CHECK(Enum, Name, Version, Msg) {#Name, Version},
3733#undef SANITIZER_CHECK
3734};
3735
3737 llvm::FunctionType *FnType,
3739 SanitizerHandler CheckHandler,
3740 CheckRecoverableKind RecoverKind, bool IsFatal,
3741 llvm::BasicBlock *ContBB, bool NoMerge) {
3742 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3743 std::optional<ApplyDebugLocation> DL;
3744 if (!CGF.Builder.getCurrentDebugLocation()) {
3745 // Ensure that the call has at least an artificial debug location.
3746 DL.emplace(CGF, SourceLocation());
3747 }
3748 bool NeedsAbortSuffix =
3749 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3750 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3751 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3752 const StringRef CheckName = CheckInfo.Name;
3753 std::string FnName = "__ubsan_handle_" + CheckName.str();
3754 if (CheckInfo.Version && !MinimalRuntime)
3755 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3756 if (MinimalRuntime)
3757 FnName += "_minimal";
3758 if (NeedsAbortSuffix)
3759 FnName += "_abort";
3760 bool MayReturn =
3761 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3762
3763 llvm::AttrBuilder B(CGF.getLLVMContext());
3764 if (!MayReturn) {
3765 B.addAttribute(llvm::Attribute::NoReturn)
3766 .addAttribute(llvm::Attribute::NoUnwind);
3767 }
3768 B.addUWTableAttr(llvm::UWTableKind::Default);
3769
3770 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3771 FnType, FnName,
3772 llvm::AttributeList::get(CGF.getLLVMContext(),
3773 llvm::AttributeList::FunctionIndex, B),
3774 /*Local=*/true);
3775 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3776 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
3777 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3778 if (NoMerge)
3779 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
3780 if (!MayReturn) {
3781 HandlerCall->setDoesNotReturn();
3782 CGF.Builder.CreateUnreachable();
3783 } else {
3784 CGF.Builder.CreateBr(ContBB);
3785 }
3786}
3787
3789 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
3790 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3791 ArrayRef<llvm::Value *> DynamicArgs, const TrapReason *TR) {
3792 assert(IsSanitizerScope);
3793 assert(Checked.size() > 0);
3794 assert(CheckHandler >= 0 &&
3795 size_t(CheckHandler) < std::size(SanitizerHandlers));
3796 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3797
3798 llvm::Value *FatalCond = nullptr;
3799 llvm::Value *RecoverableCond = nullptr;
3800 llvm::Value *TrapCond = nullptr;
3801 bool NoMerge = false;
3802 // Expand checks into:
3803 // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
3804 // We need separate allow_ubsan_check intrinsics because they have separately
3805 // specified cutoffs.
3806 // This expression looks expensive but will be simplified after
3807 // LowerAllowCheckPass.
3808 for (auto &[Check, Ord] : Checked) {
3809 llvm::Value *GuardedCheck = Check;
3811 (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
3812 llvm::Value *Allow = Builder.CreateCall(
3813 CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3814 llvm::ConstantInt::get(CGM.Int8Ty, Ord));
3815 GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
3816 }
3817
3818 // -fsanitize-trap= overrides -fsanitize-recover=.
3819 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
3820 : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
3821 ? RecoverableCond
3822 : FatalCond;
3823 Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
3824
3825 if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
3826 NoMerge = true;
3827 }
3828
3829 if (TrapCond)
3830 EmitTrapCheck(TrapCond, CheckHandler, NoMerge, TR);
3831 if (!FatalCond && !RecoverableCond)
3832 return;
3833
3834 llvm::Value *JointCond;
3835 if (FatalCond && RecoverableCond)
3836 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3837 else
3838 JointCond = FatalCond ? FatalCond : RecoverableCond;
3839 assert(JointCond);
3840
3841 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3842 assert(SanOpts.has(Checked[0].second));
3843#ifndef NDEBUG
3844 for (int i = 1, n = Checked.size(); i < n; ++i) {
3845 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3846 "All recoverable kinds in a single check must be same!");
3847 assert(SanOpts.has(Checked[i].second));
3848 }
3849#endif
3850
3851 llvm::BasicBlock *Cont = createBasicBlock("cont");
3852 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3853 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3854 // Give hint that we very much don't expect to execute the handler
3855 llvm::MDBuilder MDHelper(getLLVMContext());
3856 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3857 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3858 EmitBlock(Handlers);
3859
3860 // Clear arguments for the MinimalRuntime handler.
3861 if (CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3862 switch (CheckHandler) {
3863 case SanitizerHandler::TypeMismatch:
3864 // Pass value pointer only. It adds minimal overhead.
3865 StaticArgs = {};
3866 assert(DynamicArgs.size() == 1);
3867 break;
3868 default:
3869 // No arguments for other checks.
3870 StaticArgs = {};
3871 DynamicArgs = {};
3872 break;
3873 }
3874 }
3875
3876 // Handler functions take an i8* pointing to the (handler-specific) static
3877 // information block, followed by a sequence of intptr_t arguments
3878 // representing operand values.
3881
3882 Args.reserve(DynamicArgs.size() + 1);
3883 ArgTypes.reserve(DynamicArgs.size() + 1);
3884
3885 // Emit handler arguments and create handler function type.
3886 if (!StaticArgs.empty()) {
3887 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3888 auto *InfoPtr = new llvm::GlobalVariable(
3889 CGM.getModule(), Info->getType(),
3890 // Non-constant global is used in a handler to deduplicate reports.
3891 // TODO: change deduplication logic and make it constant.
3892 /*isConstant=*/false, llvm::GlobalVariable::PrivateLinkage, Info, "",
3893 nullptr, llvm::GlobalVariable::NotThreadLocal,
3894 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3895 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3896 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3897 Args.push_back(InfoPtr);
3898 ArgTypes.push_back(Args.back()->getType());
3899 }
3900
3901 for (llvm::Value *DynamicArg : DynamicArgs) {
3902 Args.push_back(EmitCheckValue(DynamicArg));
3903 ArgTypes.push_back(IntPtrTy);
3904 }
3905
3906 llvm::FunctionType *FnType =
3907 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3908
3909 if (!FatalCond || !RecoverableCond) {
3910 // Simple case: we need to generate a single handler call, either
3911 // fatal, or non-fatal.
3912 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3913 (FatalCond != nullptr), Cont, NoMerge);
3914 } else {
3915 // Emit two handler calls: first one for set of unrecoverable checks,
3916 // another one for recoverable.
3917 llvm::BasicBlock *NonFatalHandlerBB =
3918 createBasicBlock("non_fatal." + CheckName);
3919 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3920 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3921 EmitBlock(FatalHandlerBB);
3922 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3923 NonFatalHandlerBB, NoMerge);
3924 EmitBlock(NonFatalHandlerBB);
3925 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3926 Cont, NoMerge);
3927 }
3928
3929 EmitBlock(Cont);
3930}
3931
3933 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
3934 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
3935 ArrayRef<llvm::Constant *> StaticArgs) {
3936 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3937
3938 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3939 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3940
3941 llvm::MDBuilder MDHelper(getLLVMContext());
3942 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3943 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3944
3945 EmitBlock(CheckBB);
3946
3947 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
3948
3949 llvm::CallInst *CheckCall;
3950 llvm::FunctionCallee SlowPathFn;
3951 if (WithDiag) {
3952 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3953 auto *InfoPtr =
3954 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3955 llvm::GlobalVariable::PrivateLinkage, Info);
3956 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3957 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3958
3959 SlowPathFn = CGM.getModule().getOrInsertFunction(
3960 "__cfi_slowpath_diag",
3961 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3962 false));
3963 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3964 } else {
3965 SlowPathFn = CGM.getModule().getOrInsertFunction(
3966 "__cfi_slowpath",
3967 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3968 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3969 }
3970
3971 CGM.setDSOLocal(
3972 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3973 CheckCall->setDoesNotThrow();
3974
3975 EmitBlock(Cont);
3976}
3977
3978// Emit a stub for __cfi_check function so that the linker knows about this
3979// symbol in LTO mode.
3981 llvm::Module *M = &CGM.getModule();
3982 ASTContext &C = getContext();
3983 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3984
3986 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3987 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3988 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3990 FnArgs.push_back(&ArgCallsiteTypeId);
3991 FnArgs.push_back(&ArgAddr);
3992 FnArgs.push_back(&ArgCFICheckFailData);
3993 const CGFunctionInfo &FI =
3994 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
3995
3996 llvm::Function *F = llvm::Function::Create(
3997 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3998 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3999 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4000 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4001 F->setAlignment(llvm::Align(4096));
4002 CGM.setDSOLocal(F);
4003
4004 llvm::LLVMContext &Ctx = M->getContext();
4005 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
4006 // CrossDSOCFI pass is not executed if there is no executable code.
4007 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
4008 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
4009 llvm::ReturnInst::Create(Ctx, nullptr, BB);
4010}
4011
4012// This function is basically a switch over the CFI failure kind, which is
4013// extracted from CFICheckFailData (1st function argument). Each case is either
4014// llvm.trap or a call to one of the two runtime handlers, based on
4015// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
4016// failure kind) traps, but this should really never happen. CFICheckFailData
4017// can be nullptr if the calling module has -fsanitize-trap behavior for this
4018// check kind; in this case __cfi_check_fail traps as well.
4020 auto CheckHandler = SanitizerHandler::CFICheckFail;
4021 // TODO: the SanitizerKind is not yet determined for this check (and might
4022 // not even be available, if Data == nullptr). However, we still want to
4023 // annotate the instrumentation. We approximate this by using all the CFI
4024 // kinds.
4025 SanitizerDebugLocation SanScope(
4026 this,
4027 {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall,
4028 SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast,
4029 SanitizerKind::SO_CFIICall},
4030 CheckHandler);
4031 FunctionArgList Args;
4036 Args.push_back(&ArgData);
4037 Args.push_back(&ArgAddr);
4038
4039 const CGFunctionInfo &FI =
4040 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
4041
4042 llvm::Function *F = llvm::Function::Create(
4043 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
4044 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
4045
4046 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4047 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4048 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
4049
4050 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
4051 SourceLocation());
4052
4054
4055 // This function is not affected by NoSanitizeList. This function does
4056 // not have a source location, but "src:*" would still apply. Revert any
4057 // changes to SanOpts made in StartFunction.
4058 SanOpts = CGM.getLangOpts().Sanitize;
4059
4060 llvm::Value *Data =
4061 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
4062 CGM.getContext().VoidPtrTy, ArgData.getLocation());
4063 llvm::Value *Addr =
4064 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
4065 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
4066
4067 // Data == nullptr means the calling module has trap behaviour for this check.
4068 llvm::Value *DataIsNotNullPtr =
4069 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
4070 // TODO: since there is no data, we don't know the CheckKind, and therefore
4071 // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to
4072 // NoMerge = false. Users can disable merging by disabling optimization.
4073 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail,
4074 /*NoMerge=*/false);
4075
4076 llvm::StructType *SourceLocationTy =
4077 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
4078 llvm::StructType *CfiCheckFailDataTy =
4079 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
4080
4081 llvm::Value *V = Builder.CreateConstGEP2_32(
4082 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
4083
4084 Address CheckKindAddr(V, Int8Ty, getIntAlign());
4085 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
4086
4087 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
4088 CGM.getLLVMContext(),
4089 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
4090 llvm::Value *ValidVtable = Builder.CreateZExt(
4091 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
4092 {Addr, AllVtables}),
4093 IntPtrTy);
4094
4095 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
4096 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
4097 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
4098 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
4099 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
4100 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
4101
4102 for (auto CheckKindOrdinalPair : CheckKinds) {
4103 int Kind = CheckKindOrdinalPair.first;
4104 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
4105
4106 // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of
4107 // relying on the SanitizerScope with all CFI ordinals
4108
4109 llvm::Value *Cond =
4110 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
4111 if (CGM.getLangOpts().Sanitize.has(Ordinal))
4112 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
4113 {}, {Data, Addr, ValidVtable});
4114 else
4115 // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers.
4116 // Although the compiler allows SanitizeMergeHandlers to be set
4117 // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp
4118 // requires that SanitizeMergeHandlers is a subset of Sanitize.
4119 EmitTrapCheck(Cond, CheckHandler, /*NoMerge=*/false);
4120 }
4121
4123 // The only reference to this function will be created during LTO link.
4124 // Make sure it survives until then.
4125 CGM.addUsedGlobal(F);
4126}
4127
4129 if (SanOpts.has(SanitizerKind::Unreachable)) {
4130 auto CheckOrdinal = SanitizerKind::SO_Unreachable;
4131 auto CheckHandler = SanitizerHandler::BuiltinUnreachable;
4132 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4133 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
4134 CheckOrdinal),
4135 CheckHandler, EmitCheckSourceLocation(Loc), {});
4136 }
4137 Builder.CreateUnreachable();
4138}
4139
4140void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
4141 SanitizerHandler CheckHandlerID,
4142 bool NoMerge, const TrapReason *TR) {
4143 llvm::BasicBlock *Cont = createBasicBlock("cont");
4144
4145 // If we're optimizing, collapse all calls to trap down to just one per
4146 // check-type per function to save on code size.
4147 if ((int)TrapBBs.size() <= CheckHandlerID)
4148 TrapBBs.resize(CheckHandlerID + 1);
4149
4150 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
4151
4152 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
4153 llvm::StringRef TrapMessage;
4154 llvm::StringRef TrapCategory;
4155 auto DebugTrapReasonKind = CGM.getCodeGenOpts().getSanitizeDebugTrapReasons();
4156 if (TR && !TR->isEmpty() &&
4157 DebugTrapReasonKind ==
4159 TrapMessage = TR->getMessage();
4160 TrapCategory = TR->getCategory();
4161 } else {
4162 TrapMessage = GetUBSanTrapForHandler(CheckHandlerID);
4163 TrapCategory = "Undefined Behavior Sanitizer";
4164 }
4165
4166 if (getDebugInfo() && !TrapMessage.empty() &&
4167 DebugTrapReasonKind !=
4169 TrapLocation) {
4170 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
4171 TrapLocation, TrapCategory, TrapMessage);
4172 }
4173
4174 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
4175 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4176
4177 llvm::MDBuilder MDHelper(getLLVMContext());
4178 if (TrapBB && !NoMerge) {
4179 auto Call = TrapBB->begin();
4180 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
4181
4182 Call->applyMergedLocation(Call->getDebugLoc(), TrapLocation);
4183
4184 Builder.CreateCondBr(Checked, Cont, TrapBB,
4185 MDHelper.createLikelyBranchWeights());
4186 } else {
4187 TrapBB = createBasicBlock("trap");
4188 Builder.CreateCondBr(Checked, Cont, TrapBB,
4189 MDHelper.createLikelyBranchWeights());
4190 EmitBlock(TrapBB);
4191
4192 ApplyDebugLocation applyTrapDI(*this, TrapLocation);
4193
4194 llvm::CallInst *TrapCall =
4195 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
4196 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
4197
4198 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4199 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4200 CGM.getCodeGenOpts().TrapFuncName);
4201 TrapCall->addFnAttr(A);
4202 }
4203 if (NoMerge)
4204 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4205 TrapCall->setDoesNotReturn();
4206 TrapCall->setDoesNotThrow();
4207 Builder.CreateUnreachable();
4208 }
4209
4210 EmitBlock(Cont);
4211}
4212
4213llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
4214 llvm::CallInst *TrapCall =
4215 Builder.CreateCall(CGM.getIntrinsic(IntrID));
4216
4217 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4218 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4219 CGM.getCodeGenOpts().TrapFuncName);
4220 TrapCall->addFnAttr(A);
4221 }
4222
4224 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4225 return TrapCall;
4226}
4227
4229 LValueBaseInfo *BaseInfo,
4230 TBAAAccessInfo *TBAAInfo) {
4231 assert(E->getType()->isArrayType() &&
4232 "Array to pointer decay must have array source type!");
4233
4234 // Expressions of array type can't be bitfields or vector elements.
4235 LValue LV = EmitLValue(E);
4236 Address Addr = LV.getAddress();
4237
4238 // If the array type was an incomplete type, we need to make sure
4239 // the decay ends up being the right type.
4240 llvm::Type *NewTy = ConvertType(E->getType());
4241 Addr = Addr.withElementType(NewTy);
4242
4243 // Note that VLA pointers are always decayed, so we don't need to do
4244 // anything here.
4245 if (!E->getType()->isVariableArrayType()) {
4246 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4247 "Expected pointer to array");
4248 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4249 }
4250
4251 // The result of this decay conversion points to an array element within the
4252 // base lvalue. However, since TBAA currently does not support representing
4253 // accesses to elements of member arrays, we conservatively represent accesses
4254 // to the pointee object as if it had no any base lvalue specified.
4255 // TODO: Support TBAA for member arrays.
4257 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4258 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4259
4260 return Addr.withElementType(ConvertTypeForMem(EltType));
4261}
4262
4263/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4264/// array to pointer, return the array subexpression.
4265static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4266 // If this isn't just an array->pointer decay, bail out.
4267 const auto *CE = dyn_cast<CastExpr>(E);
4268 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4269 return nullptr;
4270
4271 // If this is a decay from variable width array, bail out.
4272 const Expr *SubExpr = CE->getSubExpr();
4273 if (SubExpr->getType()->isVariableArrayType())
4274 return nullptr;
4275
4276 return SubExpr;
4277}
4278
4280 llvm::Type *elemType,
4281 llvm::Value *ptr,
4282 ArrayRef<llvm::Value*> indices,
4283 bool inbounds,
4284 bool signedIndices,
4285 SourceLocation loc,
4286 const llvm::Twine &name = "arrayidx") {
4287 if (inbounds) {
4288 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4290 name);
4291 } else {
4292 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4293 }
4294}
4295
4298 llvm::Type *elementType, bool inbounds,
4299 bool signedIndices, SourceLocation loc,
4300 CharUnits align,
4301 const llvm::Twine &name = "arrayidx") {
4302 if (inbounds) {
4303 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4305 align, name);
4306 } else {
4307 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4308 }
4309}
4310
4312 const VariableArrayType *vla) {
4313 QualType eltType;
4314 do {
4315 eltType = vla->getElementType();
4316 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4317 return eltType;
4318}
4319
4321 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4322}
4323
4324static bool hasBPFPreserveStaticOffset(const Expr *E) {
4325 if (!E)
4326 return false;
4327 QualType PointeeType = E->getType()->getPointeeType();
4328 if (PointeeType.isNull())
4329 return false;
4330 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4331 return hasBPFPreserveStaticOffset(BaseDecl);
4332 return false;
4333}
4334
4335// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4337 Address &Addr) {
4338 if (!CGF.getTarget().getTriple().isBPF())
4339 return Addr;
4340
4341 llvm::Function *Fn =
4342 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4343 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4344 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4345}
4346
4347/// Given an array base, check whether its member access belongs to a record
4348/// with preserve_access_index attribute or not.
4349static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4350 if (!ArrayBase || !CGF.getDebugInfo())
4351 return false;
4352
4353 // Only support base as either a MemberExpr or DeclRefExpr.
4354 // DeclRefExpr to cover cases like:
4355 // struct s { int a; int b[10]; };
4356 // struct s *p;
4357 // p[1].a
4358 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4359 // p->b[5] is a MemberExpr example.
4360 const Expr *E = ArrayBase->IgnoreImpCasts();
4361 if (const auto *ME = dyn_cast<MemberExpr>(E))
4362 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4363
4364 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4365 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4366 if (!VarDef)
4367 return false;
4368
4369 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4370 if (!PtrT)
4371 return false;
4372
4373 const auto *PointeeT = PtrT->getPointeeType()
4375 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4376 return RecT->getOriginalDecl()
4377 ->getMostRecentDecl()
4378 ->hasAttr<BPFPreserveAccessIndexAttr>();
4379 return false;
4380 }
4381
4382 return false;
4383}
4384
4387 QualType eltType, bool inbounds,
4388 bool signedIndices, SourceLocation loc,
4389 QualType *arrayType = nullptr,
4390 const Expr *Base = nullptr,
4391 const llvm::Twine &name = "arrayidx") {
4392 // All the indices except that last must be zero.
4393#ifndef NDEBUG
4394 for (auto *idx : indices.drop_back())
4395 assert(isa<llvm::ConstantInt>(idx) &&
4396 cast<llvm::ConstantInt>(idx)->isZero());
4397#endif
4398
4399 // Determine the element size of the statically-sized base. This is
4400 // the thing that the indices are expressed in terms of.
4401 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4402 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4403 }
4404
4405 // We can use that to compute the best alignment of the element.
4406 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4407 CharUnits eltAlign =
4408 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4409
4411 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4412
4413 llvm::Value *eltPtr;
4414 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4415 if (!LastIndex ||
4417 addr = emitArraySubscriptGEP(CGF, addr, indices,
4418 CGF.ConvertTypeForMem(eltType), inbounds,
4419 signedIndices, loc, eltAlign, name);
4420 return addr;
4421 } else {
4422 // Remember the original array subscript for bpf target
4423 unsigned idx = LastIndex->getZExtValue();
4424 llvm::DIType *DbgInfo = nullptr;
4425 if (arrayType)
4426 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4427 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4428 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4429 idx, DbgInfo);
4430 }
4431
4432 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4433}
4434
4435namespace {
4436
4437/// StructFieldAccess is a simple visitor class to grab the first l-value to
4438/// r-value cast Expr.
4439struct StructFieldAccess
4440 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
4441 const Expr *VisitCastExpr(const CastExpr *E) {
4442 if (E->getCastKind() == CK_LValueToRValue)
4443 return E;
4444 return Visit(E->getSubExpr());
4445 }
4446 const Expr *VisitParenExpr(const ParenExpr *E) {
4447 return Visit(E->getSubExpr());
4448 }
4449};
4450
4451} // end anonymous namespace
4452
4453/// The offset of a field from the beginning of the record.
4455 const FieldDecl *Field, int64_t &Offset) {
4456 ASTContext &Ctx = CGF.getContext();
4457 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4458 unsigned FieldNo = 0;
4459
4460 for (const FieldDecl *FD : RD->fields()) {
4461 if (FD == Field) {
4462 Offset += Layout.getFieldOffset(FieldNo);
4463 return true;
4464 }
4465
4466 QualType Ty = FD->getType();
4467 if (Ty->isRecordType())
4468 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4469 Offset += Layout.getFieldOffset(FieldNo);
4470 return true;
4471 }
4472
4473 if (!RD->isUnion())
4474 ++FieldNo;
4475 }
4476
4477 return false;
4478}
4479
4480/// Returns the relative offset difference between \p FD1 and \p FD2.
4481/// \code
4482/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4483/// \endcode
4484/// Both fields must be within the same struct.
4485static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4486 const FieldDecl *FD1,
4487 const FieldDecl *FD2) {
4488 const RecordDecl *FD1OuterRec =
4490 const RecordDecl *FD2OuterRec =
4492
4493 if (FD1OuterRec != FD2OuterRec)
4494 // Fields must be within the same RecordDecl.
4495 return std::optional<int64_t>();
4496
4497 int64_t FD1Offset = 0;
4498 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4499 return std::optional<int64_t>();
4500
4501 int64_t FD2Offset = 0;
4502 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4503 return std::optional<int64_t>();
4504
4505 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4506}
4507
4508/// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by"
4509/// attribute, generate bounds checking code. The "count" field is at the top
4510/// level of the struct or in an anonymous struct, that's also at the top level.
4511/// Future expansions may allow the "count" to reside at any place in the
4512/// struct, but the value of "counted_by" will be a "simple" path to the count,
4513/// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4514/// similar to emit the correct GEP.
4516 const Expr *E, llvm::Value *Idx, Address Addr, QualType IdxTy,
4517 QualType ArrayTy, bool Accessed, bool FlexibleArray) {
4518 const auto *ME = dyn_cast<MemberExpr>(E->IgnoreImpCasts());
4519 if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType())
4520 return;
4521
4522 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4523 getLangOpts().getStrictFlexArraysLevel();
4524 if (FlexibleArray &&
4525 !ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel))
4526 return;
4527
4528 const FieldDecl *FD = cast<FieldDecl>(ME->getMemberDecl());
4529 const FieldDecl *CountFD = FD->findCountedByField();
4530 if (!CountFD)
4531 return;
4532
4533 if (std::optional<int64_t> Diff =
4534 getOffsetDifferenceInBits(*this, CountFD, FD)) {
4535 if (!Addr.isValid()) {
4536 // An invalid Address indicates we're checking a pointer array access.
4537 // Emit the checked L-Value here.
4539 Addr = LV.getAddress();
4540 }
4541
4542 // FIXME: The 'static_cast' is necessary, otherwise the result turns into a
4543 // uint64_t, which messes things up if we have a negative offset difference.
4544 Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth());
4545
4546 // Create a GEP with the byte offset between the counted object and the
4547 // count and use that to load the count value.
4548 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Int8PtrTy, Int8Ty);
4549
4550 llvm::Type *CountTy = ConvertType(CountFD->getType());
4551 llvm::Value *Res =
4552 Builder.CreateInBoundsGEP(Int8Ty, Addr.emitRawPointer(*this),
4553 Builder.getInt32(*Diff), ".counted_by.gep");
4554 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4555 ".counted_by.load");
4556
4557 // Now emit the bounds checking.
4558 EmitBoundsCheckImpl(E, Res, Idx, IdxTy, ArrayTy, Accessed);
4559 }
4560}
4561
4563 bool Accessed) {
4564 // The index must always be an integer, which is not an aggregate. Emit it
4565 // in lexical order (this complexity is, sadly, required by C++17).
4566 llvm::Value *IdxPre =
4567 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4568 bool SignedIndices = false;
4569 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4570 auto *Idx = IdxPre;
4571 if (E->getLHS() != E->getIdx()) {
4572 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4573 Idx = EmitScalarExpr(E->getIdx());
4574 }
4575
4576 QualType IdxTy = E->getIdx()->getType();
4577 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4578 SignedIndices |= IdxSigned;
4579
4580 if (SanOpts.has(SanitizerKind::ArrayBounds))
4581 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4582
4583 // Extend or truncate the index type to 32 or 64-bits.
4584 if (Promote && Idx->getType() != IntPtrTy)
4585 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4586
4587 return Idx;
4588 };
4589 IdxPre = nullptr;
4590
4591 // If the base is a vector type, then we are forming a vector element lvalue
4592 // with this subscript.
4593 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4595 // Emit the vector as an lvalue to get its address.
4596 LValue LHS = EmitLValue(E->getBase());
4597 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4598 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4599 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4600 LHS.getBaseInfo(), TBAAAccessInfo());
4601 }
4602
4603 // The HLSL runtime handle the subscript expression on global resource arrays.
4604 if (getLangOpts().HLSL && (E->getType()->isHLSLResourceRecord() ||
4606 std::optional<LValue> LV =
4607 CGM.getHLSLRuntime().emitResourceArraySubscriptExpr(E, *this);
4608 if (LV.has_value())
4609 return *LV;
4610 }
4611
4612 // All the other cases basically behave like simple offsetting.
4613
4614 // Handle the extvector case we ignored above.
4616 LValue LV = EmitLValue(E->getBase());
4617 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4619
4620 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4621 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4622 SignedIndices, E->getExprLoc());
4623 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4624 CGM.getTBAAInfoForSubobject(LV, EltType));
4625 }
4626
4627 LValueBaseInfo EltBaseInfo;
4628 TBAAAccessInfo EltTBAAInfo;
4630 if (const VariableArrayType *vla =
4631 getContext().getAsVariableArrayType(E->getType())) {
4632 // The base must be a pointer, which is not an aggregate. Emit
4633 // it. It needs to be emitted first in case it's what captures
4634 // the VLA bounds.
4635 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4636 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4637
4638 // The element count here is the total number of non-VLA elements.
4639 llvm::Value *numElements = getVLASize(vla).NumElts;
4640
4641 // Effectively, the multiply by the VLA size is part of the GEP.
4642 // GEP indexes are signed, and scaling an index isn't permitted to
4643 // signed-overflow, so we use the same semantics for our explicit
4644 // multiply. We suppress this if overflow is not undefined behavior.
4645 if (getLangOpts().PointerOverflowDefined) {
4646 Idx = Builder.CreateMul(Idx, numElements);
4647 } else {
4648 Idx = Builder.CreateNSWMul(Idx, numElements);
4649 }
4650
4651 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4652 !getLangOpts().PointerOverflowDefined,
4653 SignedIndices, E->getExprLoc());
4654
4655 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4656 // Indexing over an interface, as in "NSString *P; P[4];"
4657
4658 // Emit the base pointer.
4659 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4660 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4661
4662 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4663 llvm::Value *InterfaceSizeVal =
4664 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4665
4666 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4667
4668 // We don't necessarily build correct LLVM struct types for ObjC
4669 // interfaces, so we can't rely on GEP to do this scaling
4670 // correctly, so we need to cast to i8*. FIXME: is this actually
4671 // true? A lot of other things in the fragile ABI would break...
4672 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4673
4674 // Do the GEP.
4675 CharUnits EltAlign =
4676 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4677 llvm::Value *EltPtr =
4678 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4679 ScaledIdx, false, SignedIndices, E->getExprLoc());
4680 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4681 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4682 // If this is A[i] where A is an array, the frontend will have decayed the
4683 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4684 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4685 // "gep x, i" here. Emit one "gep A, 0, i".
4686 assert(Array->getType()->isArrayType() &&
4687 "Array to pointer decay must have array source type!");
4688 LValue ArrayLV;
4689 // For simple multidimensional array indexing, set the 'accessed' flag for
4690 // better bounds-checking of the base expression.
4691 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4692 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4693 else
4694 ArrayLV = EmitLValue(Array);
4695 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4696
4697 if (SanOpts.has(SanitizerKind::ArrayBounds))
4698 EmitCountedByBoundsChecking(Array, Idx, ArrayLV.getAddress(),
4699 E->getIdx()->getType(), Array->getType(),
4700 Accessed, /*FlexibleArray=*/true);
4701
4702 // Propagate the alignment from the array itself to the result.
4703 QualType arrayType = Array->getType();
4705 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4706 E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices,
4707 E->getExprLoc(), &arrayType, E->getBase());
4708 EltBaseInfo = ArrayLV.getBaseInfo();
4709 if (!CGM.getCodeGenOpts().NewStructPathTBAA) {
4710 // Since CodeGenTBAA::getTypeInfoHelper only handles array types for
4711 // new struct path TBAA, we must a use a plain access.
4712 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4713 } else if (ArrayLV.getTBAAInfo().isMayAlias()) {
4714 EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4715 } else if (ArrayLV.getTBAAInfo().isIncomplete()) {
4716 // The array element is complete, even if the array is not.
4717 EltTBAAInfo = CGM.getTBAAAccessInfo(E->getType());
4718 } else {
4719 // The TBAA access info from the array (base) lvalue is ordinary. We will
4720 // adapt it to create access info for the element.
4721 EltTBAAInfo = ArrayLV.getTBAAInfo();
4722
4723 // We retain the TBAA struct path (BaseType and Offset members) from the
4724 // array. In the TBAA representation, we map any array access to the
4725 // element at index 0, as the index is generally a runtime value. This
4726 // element has the same offset in the base type as the array itself.
4727 // If the array lvalue had no base type, there is no point trying to
4728 // generate one, since an array itself is not a valid base type.
4729
4730 // We also retain the access type from the base lvalue, but the access
4731 // size must be updated to the size of an individual element.
4732 EltTBAAInfo.Size =
4734 }
4735 } else {
4736 // The base must be a pointer; emit it with an estimate of its alignment.
4737 Address BaseAddr =
4738 EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4739 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4740 QualType ptrType = E->getBase()->getType();
4741 Addr = emitArraySubscriptGEP(*this, BaseAddr, Idx, E->getType(),
4742 !getLangOpts().PointerOverflowDefined,
4743 SignedIndices, E->getExprLoc(), &ptrType,
4744 E->getBase());
4745
4746 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4747 StructFieldAccess Visitor;
4748 const Expr *Base = Visitor.Visit(E->getBase());
4749
4750 if (const auto *CE = dyn_cast_if_present<CastExpr>(Base);
4751 CE && CE->getCastKind() == CK_LValueToRValue)
4753 E->getIdx()->getType(), ptrType, Accessed,
4754 /*FlexibleArray=*/false);
4755 }
4756 }
4757
4758 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4759
4760 if (getLangOpts().ObjC &&
4761 getLangOpts().getGC() != LangOptions::NonGC) {
4764 }
4765 return LV;
4766}
4767
4769 llvm::Value *Idx = EmitScalarExpr(E);
4770 if (Idx->getType() == IntPtrTy)
4771 return Idx;
4772 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
4773 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
4774}
4775
4777 assert(
4778 !E->isIncomplete() &&
4779 "incomplete matrix subscript expressions should be rejected during Sema");
4780 LValue Base = EmitLValue(E->getBase());
4781
4782 // Extend or truncate the index type to 32 or 64-bits if needed.
4783 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
4784 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
4785
4786 llvm::Value *NumRows = Builder.getIntN(
4787 RowIdx->getType()->getScalarSizeInBits(),
4789 llvm::Value *FinalIdx =
4790 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4791 return LValue::MakeMatrixElt(
4792 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4793 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4794}
4795
4797 LValueBaseInfo &BaseInfo,
4798 TBAAAccessInfo &TBAAInfo,
4799 QualType BaseTy, QualType ElTy,
4800 bool IsLowerBound) {
4801 LValue BaseLVal;
4802 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4803 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4804 if (BaseTy->isArrayType()) {
4805 Address Addr = BaseLVal.getAddress();
4806 BaseInfo = BaseLVal.getBaseInfo();
4807
4808 // If the array type was an incomplete type, we need to make sure
4809 // the decay ends up being the right type.
4810 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4811 Addr = Addr.withElementType(NewTy);
4812
4813 // Note that VLA pointers are always decayed, so we don't need to do
4814 // anything here.
4815 if (!BaseTy->isVariableArrayType()) {
4816 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4817 "Expected pointer to array");
4818 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4819 }
4820
4821 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4822 }
4823 LValueBaseInfo TypeBaseInfo;
4824 TBAAAccessInfo TypeTBAAInfo;
4825 CharUnits Align =
4826 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4827 BaseInfo.mergeForCast(TypeBaseInfo);
4828 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4829 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4830 CGF.ConvertTypeForMem(ElTy), Align);
4831 }
4832 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4833}
4834
4836 bool IsLowerBound) {
4837
4838 assert(!E->isOpenACCArraySection() &&
4839 "OpenACC Array section codegen not implemented");
4840
4842 QualType ResultExprTy;
4843 if (auto *AT = getContext().getAsArrayType(BaseTy))
4844 ResultExprTy = AT->getElementType();
4845 else
4846 ResultExprTy = BaseTy->getPointeeType();
4847 llvm::Value *Idx = nullptr;
4848 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4849 // Requesting lower bound or upper bound, but without provided length and
4850 // without ':' symbol for the default length -> length = 1.
4851 // Idx = LowerBound ?: 0;
4852 if (auto *LowerBound = E->getLowerBound()) {
4853 Idx = Builder.CreateIntCast(
4854 EmitScalarExpr(LowerBound), IntPtrTy,
4855 LowerBound->getType()->hasSignedIntegerRepresentation());
4856 } else
4857 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4858 } else {
4859 // Try to emit length or lower bound as constant. If this is possible, 1
4860 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4861 // IR (LB + Len) - 1.
4862 auto &C = CGM.getContext();
4863 auto *Length = E->getLength();
4864 llvm::APSInt ConstLength;
4865 if (Length) {
4866 // Idx = LowerBound + Length - 1;
4867 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4868 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4869 Length = nullptr;
4870 }
4871 auto *LowerBound = E->getLowerBound();
4872 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4873 if (LowerBound) {
4874 if (std::optional<llvm::APSInt> LB =
4875 LowerBound->getIntegerConstantExpr(C)) {
4876 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4877 LowerBound = nullptr;
4878 }
4879 }
4880 if (!Length)
4881 --ConstLength;
4882 else if (!LowerBound)
4883 --ConstLowerBound;
4884
4885 if (Length || LowerBound) {
4886 auto *LowerBoundVal =
4887 LowerBound
4888 ? Builder.CreateIntCast(
4889 EmitScalarExpr(LowerBound), IntPtrTy,
4890 LowerBound->getType()->hasSignedIntegerRepresentation())
4891 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4892 auto *LengthVal =
4893 Length
4894 ? Builder.CreateIntCast(
4895 EmitScalarExpr(Length), IntPtrTy,
4896 Length->getType()->hasSignedIntegerRepresentation())
4897 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4898 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4899 /*HasNUW=*/false,
4900 !getLangOpts().PointerOverflowDefined);
4901 if (Length && LowerBound) {
4902 Idx = Builder.CreateSub(
4903 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4904 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4905 }
4906 } else
4907 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4908 } else {
4909 // Idx = ArraySize - 1;
4910 QualType ArrayTy = BaseTy->isPointerType()
4912 : BaseTy;
4913 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4914 Length = VAT->getSizeExpr();
4915 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4916 ConstLength = *L;
4917 Length = nullptr;
4918 }
4919 } else {
4920 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4921 assert(CAT && "unexpected type for array initializer");
4922 ConstLength = CAT->getSize();
4923 }
4924 if (Length) {
4925 auto *LengthVal = Builder.CreateIntCast(
4926 EmitScalarExpr(Length), IntPtrTy,
4927 Length->getType()->hasSignedIntegerRepresentation());
4928 Idx = Builder.CreateSub(
4929 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4930 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4931 } else {
4932 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4933 --ConstLength;
4934 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4935 }
4936 }
4937 }
4938 assert(Idx);
4939
4940 Address EltPtr = Address::invalid();
4941 LValueBaseInfo BaseInfo;
4942 TBAAAccessInfo TBAAInfo;
4943 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4944 // The base must be a pointer, which is not an aggregate. Emit
4945 // it. It needs to be emitted first in case it's what captures
4946 // the VLA bounds.
4947 Address Base =
4948 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4949 BaseTy, VLA->getElementType(), IsLowerBound);
4950 // The element count here is the total number of non-VLA elements.
4951 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4952
4953 // Effectively, the multiply by the VLA size is part of the GEP.
4954 // GEP indexes are signed, and scaling an index isn't permitted to
4955 // signed-overflow, so we use the same semantics for our explicit
4956 // multiply. We suppress this if overflow is not undefined behavior.
4957 if (getLangOpts().PointerOverflowDefined)
4958 Idx = Builder.CreateMul(Idx, NumElements);
4959 else
4960 Idx = Builder.CreateNSWMul(Idx, NumElements);
4961 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4962 !getLangOpts().PointerOverflowDefined,
4963 /*signedIndices=*/false, E->getExprLoc());
4964 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4965 // If this is A[i] where A is an array, the frontend will have decayed the
4966 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4967 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4968 // "gep x, i" here. Emit one "gep A, 0, i".
4969 assert(Array->getType()->isArrayType() &&
4970 "Array to pointer decay must have array source type!");
4971 LValue ArrayLV;
4972 // For simple multidimensional array indexing, set the 'accessed' flag for
4973 // better bounds-checking of the base expression.
4974 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4975 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4976 else
4977 ArrayLV = EmitLValue(Array);
4978
4979 // Propagate the alignment from the array itself to the result.
4980 EltPtr = emitArraySubscriptGEP(
4981 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4982 ResultExprTy, !getLangOpts().PointerOverflowDefined,
4983 /*signedIndices=*/false, E->getExprLoc());
4984 BaseInfo = ArrayLV.getBaseInfo();
4985 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4986 } else {
4987 Address Base =
4988 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4989 ResultExprTy, IsLowerBound);
4990 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4991 !getLangOpts().PointerOverflowDefined,
4992 /*signedIndices=*/false, E->getExprLoc());
4993 }
4994
4995 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4996}
4997
5000 // Emit the base vector as an l-value.
5001 LValue Base;
5002
5003 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
5004 if (E->isArrow()) {
5005 // If it is a pointer to a vector, emit the address and form an lvalue with
5006 // it.
5007 LValueBaseInfo BaseInfo;
5008 TBAAAccessInfo TBAAInfo;
5009 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
5010 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
5011 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
5012 Base.getQuals().removeObjCGCAttr();
5013 } else if (E->getBase()->isGLValue()) {
5014 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
5015 // emit the base as an lvalue.
5016 assert(E->getBase()->getType()->isVectorType());
5017 Base = EmitLValue(E->getBase());
5018 } else {
5019 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
5020 assert(E->getBase()->getType()->isVectorType() &&
5021 "Result must be a vector");
5022 llvm::Value *Vec = EmitScalarExpr(E->getBase());
5023
5024 // Store the vector to memory (because LValue wants an address).
5025 Address VecMem = CreateMemTemp(E->getBase()->getType());
5026 // need to zero extend an hlsl boolean vector to store it back to memory
5027 QualType Ty = E->getBase()->getType();
5028 llvm::Type *LTy = convertTypeForLoadStore(Ty, Vec->getType());
5029 if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits())
5030 Vec = Builder.CreateZExt(Vec, LTy);
5031 Builder.CreateStore(Vec, VecMem);
5033 }
5034
5035 QualType type =
5036 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
5037
5038 // Encode the element access list into a vector of unsigned indices.
5040 E->getEncodedElementAccess(Indices);
5041
5042 if (Base.isSimple()) {
5043 llvm::Constant *CV =
5044 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
5045 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
5046 Base.getBaseInfo(), TBAAAccessInfo());
5047 }
5048 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
5049
5050 llvm::Constant *BaseElts = Base.getExtVectorElts();
5052
5053 for (unsigned Index : Indices)
5054 CElts.push_back(BaseElts->getAggregateElement(Index));
5055 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
5056 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
5057 Base.getBaseInfo(), TBAAAccessInfo());
5058}
5059
5061 const Expr *UnderlyingBaseExpr = E->IgnoreParens();
5062 while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(UnderlyingBaseExpr))
5063 UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens();
5064 return getContext().isSentinelNullExpr(UnderlyingBaseExpr);
5065}
5066
5068 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
5070 return EmitDeclRefLValue(DRE);
5071 }
5072
5073 Expr *BaseExpr = E->getBase();
5074 // Check whether the underlying base pointer is a constant null.
5075 // If so, we do not set inbounds flag for GEP to avoid breaking some
5076 // old-style offsetof idioms.
5077 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
5079 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
5080 LValue BaseLV;
5081 if (E->isArrow()) {
5082 LValueBaseInfo BaseInfo;
5083 TBAAAccessInfo TBAAInfo;
5084 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
5085 QualType PtrTy = BaseExpr->getType()->getPointeeType();
5086 SanitizerSet SkippedChecks;
5087 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
5088 if (IsBaseCXXThis)
5089 SkippedChecks.set(SanitizerKind::Alignment, true);
5090 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
5091 SkippedChecks.set(SanitizerKind::Null, true);
5093 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
5094 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
5095 } else
5096 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
5097
5098 NamedDecl *ND = E->getMemberDecl();
5099 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
5100 LValue LV = EmitLValueForField(BaseLV, Field, IsInBounds);
5102 if (getLangOpts().OpenMP) {
5103 // If the member was explicitly marked as nontemporal, mark it as
5104 // nontemporal. If the base lvalue is marked as nontemporal, mark access
5105 // to children as nontemporal too.
5106 if ((IsWrappedCXXThis(BaseExpr) &&
5107 CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
5108 BaseLV.isNontemporal())
5109 LV.setNontemporal(/*Value=*/true);
5110 }
5111 return LV;
5112 }
5113
5114 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
5115 return EmitFunctionDeclLValue(*this, E, FD);
5116
5117 llvm_unreachable("Unhandled member declaration!");
5118}
5119
5120/// Given that we are currently emitting a lambda, emit an l-value for
5121/// one of its members.
5122///
5124 llvm::Value *ThisValue) {
5125 bool HasExplicitObjectParameter = false;
5126 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
5127 if (MD) {
5128 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
5129 assert(MD->getParent()->isLambda());
5130 assert(MD->getParent() == Field->getParent());
5131 }
5132 LValue LambdaLV;
5133 if (HasExplicitObjectParameter) {
5134 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
5135 auto It = LocalDeclMap.find(D);
5136 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
5137 Address AddrOfExplicitObject = It->getSecond();
5138 if (D->getType()->isReferenceType())
5139 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
5141 else
5142 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
5144
5145 // Make sure we have an lvalue to the lambda itself and not a derived class.
5146 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
5147 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
5148 if (ThisTy != LambdaTy) {
5149 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
5151 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
5152 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
5154 LambdaLV = MakeAddrLValue(Base, T);
5155 }
5156 } else {
5157 CanQualType LambdaTagType =
5158 getContext().getCanonicalTagType(Field->getParent());
5159 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
5160 }
5161 return EmitLValueForField(LambdaLV, Field);
5162}
5163
5165 return EmitLValueForLambdaField(Field, CXXABIThisValue);
5166}
5167
5168/// Get the field index in the debug info. The debug info structure/union
5169/// will ignore the unnamed bitfields.
5171 unsigned FieldIndex) {
5172 unsigned I = 0, Skipped = 0;
5173
5174 for (auto *F : Rec->getDefinition()->fields()) {
5175 if (I == FieldIndex)
5176 break;
5177 if (F->isUnnamedBitField())
5178 Skipped++;
5179 I++;
5180 }
5181
5182 return FieldIndex - Skipped;
5183}
5184
5185/// Get the address of a zero-sized field within a record. The resulting
5186/// address doesn't necessarily have the right type.
5188 const FieldDecl *Field,
5189 bool IsInBounds) {
5191 CGF.getContext().getFieldOffset(Field));
5192 if (Offset.isZero())
5193 return Base;
5194 Base = Base.withElementType(CGF.Int8Ty);
5195 if (!IsInBounds)
5196 return CGF.Builder.CreateConstByteGEP(Base, Offset);
5197 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
5198}
5199
5200/// Drill down to the storage of a field without walking into
5201/// reference types.
5202///
5203/// The resulting address doesn't necessarily have the right type.
5205 const FieldDecl *field, bool IsInBounds) {
5206 if (isEmptyFieldForLayout(CGF.getContext(), field))
5207 return emitAddrOfZeroSizeField(CGF, base, field, IsInBounds);
5208
5209 const RecordDecl *rec = field->getParent();
5210
5211 unsigned idx =
5212 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5213
5214 if (!IsInBounds)
5215 return CGF.Builder.CreateConstGEP2_32(base, 0, idx, field->getName());
5216
5217 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
5218}
5219
5221 Address addr, const FieldDecl *field) {
5222 const RecordDecl *rec = field->getParent();
5223 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
5224 base.getType(), rec->getLocation());
5225
5226 unsigned idx =
5227 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5228
5230 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
5231}
5232
5233static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
5234 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
5235 if (!RD)
5236 return false;
5237
5238 if (RD->isDynamicClass())
5239 return true;
5240
5241 for (const auto &Base : RD->bases())
5242 if (hasAnyVptr(Base.getType(), Context))
5243 return true;
5244
5245 for (const FieldDecl *Field : RD->fields())
5246 if (hasAnyVptr(Field->getType(), Context))
5247 return true;
5248
5249 return false;
5250}
5251
5253 bool IsInBounds) {
5254 LValueBaseInfo BaseInfo = base.getBaseInfo();
5255
5256 if (field->isBitField()) {
5257 const CGRecordLayout &RL =
5258 CGM.getTypes().getCGRecordLayout(field->getParent());
5259 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
5260 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
5261 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
5262 Info.VolatileStorageSize != 0 &&
5263 field->getType()
5266 Address Addr = base.getAddress();
5267 unsigned Idx = RL.getLLVMFieldNo(field);
5268 const RecordDecl *rec = field->getParent();
5271 if (!UseVolatile) {
5272 if (!IsInPreservedAIRegion &&
5273 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5274 if (Idx != 0) {
5275 // For structs, we GEP to the field that the record layout suggests.
5276 if (!IsInBounds)
5277 Addr = Builder.CreateConstGEP2_32(Addr, 0, Idx, field->getName());
5278 else
5279 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
5280 }
5281 } else {
5282 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
5283 getContext().getCanonicalTagType(rec), rec->getLocation());
5284 Addr = Builder.CreatePreserveStructAccessIndex(
5285 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
5286 DbgInfo);
5287 }
5288 }
5289 const unsigned SS =
5290 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
5291 // Get the access type.
5292 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
5293 Addr = Addr.withElementType(FieldIntTy);
5294 if (UseVolatile) {
5295 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
5296 if (VolatileOffset)
5297 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
5298 }
5299
5300 QualType fieldType =
5301 field->getType().withCVRQualifiers(base.getVRQualifiers());
5302 // TODO: Support TBAA for bit fields.
5303 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
5304 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
5305 TBAAAccessInfo());
5306 }
5307
5308 // Fields of may-alias structures are may-alias themselves.
5309 // FIXME: this should get propagated down through anonymous structs
5310 // and unions.
5311 QualType FieldType = field->getType();
5312 const RecordDecl *rec = field->getParent();
5313 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
5314 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
5315 TBAAAccessInfo FieldTBAAInfo;
5316 if (base.getTBAAInfo().isMayAlias() ||
5317 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
5318 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5319 } else if (rec->isUnion()) {
5320 // TODO: Support TBAA for unions.
5321 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5322 } else {
5323 // If no base type been assigned for the base access, then try to generate
5324 // one for this base lvalue.
5325 FieldTBAAInfo = base.getTBAAInfo();
5326 if (!FieldTBAAInfo.BaseType) {
5327 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
5328 assert(!FieldTBAAInfo.Offset &&
5329 "Nonzero offset for an access with no base type!");
5330 }
5331
5332 // Adjust offset to be relative to the base type.
5333 const ASTRecordLayout &Layout =
5335 unsigned CharWidth = getContext().getCharWidth();
5336 if (FieldTBAAInfo.BaseType)
5337 FieldTBAAInfo.Offset +=
5338 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
5339
5340 // Update the final access type and size.
5341 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
5342 FieldTBAAInfo.Size =
5344 }
5345
5346 Address addr = base.getAddress();
5348 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
5349 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
5350 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5351 ClassDef->isDynamicClass()) {
5352 // Getting to any field of dynamic object requires stripping dynamic
5353 // information provided by invariant.group. This is because accessing
5354 // fields may leak the real address of dynamic object, which could result
5355 // in miscompilation when leaked pointer would be compared.
5356 auto *stripped =
5357 Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
5358 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5359 }
5360 }
5361
5362 unsigned RecordCVR = base.getVRQualifiers();
5363 if (rec->isUnion()) {
5364 // For unions, there is no pointer adjustment.
5365 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5366 hasAnyVptr(FieldType, getContext()))
5367 // Because unions can easily skip invariant.barriers, we need to add
5368 // a barrier every time CXXRecord field with vptr is referenced.
5369 addr = Builder.CreateLaunderInvariantGroup(addr);
5370
5372 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5373 // Remember the original union field index
5374 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5375 rec->getLocation());
5376 addr =
5377 Address(Builder.CreatePreserveUnionAccessIndex(
5378 addr.emitRawPointer(*this),
5379 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5380 addr.getElementType(), addr.getAlignment());
5381 }
5382
5383 if (FieldType->isReferenceType())
5384 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5385 } else {
5386 if (!IsInPreservedAIRegion &&
5387 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5388 // For structs, we GEP to the field that the record layout suggests.
5389 addr = emitAddrOfFieldStorage(*this, addr, field, IsInBounds);
5390 else
5391 // Remember the original struct field index
5392 addr = emitPreserveStructAccess(*this, base, addr, field);
5393 }
5394
5395 // If this is a reference field, load the reference right now.
5396 if (FieldType->isReferenceType()) {
5397 LValue RefLVal =
5398 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5399 if (RecordCVR & Qualifiers::Volatile)
5400 RefLVal.getQuals().addVolatile();
5401 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5402
5403 // Qualifiers on the struct don't apply to the referencee.
5404 RecordCVR = 0;
5405 FieldType = FieldType->getPointeeType();
5406 }
5407
5408 // Make sure that the address is pointing to the right type. This is critical
5409 // for both unions and structs.
5410 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5411
5412 if (field->hasAttr<AnnotateAttr>())
5413 addr = EmitFieldAnnotations(field, addr);
5414
5415 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5416 LV.getQuals().addCVRQualifiers(RecordCVR);
5417
5418 // __weak attribute on a field is ignored.
5421
5422 return LV;
5423}
5424
5425LValue
5427 const FieldDecl *Field) {
5428 QualType FieldType = Field->getType();
5429
5430 if (!FieldType->isReferenceType())
5431 return EmitLValueForField(Base, Field);
5432
5434 *this, Base.getAddress(), Field,
5435 /*IsInBounds=*/!getLangOpts().PointerOverflowDefined);
5436
5437 // Make sure that the address is pointing to the right type.
5438 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5439 V = V.withElementType(llvmType);
5440
5441 // TODO: Generate TBAA information that describes this access as a structure
5442 // member access and not just an access to an object of the field's type. This
5443 // should be similar to what we do in EmitLValueForField().
5444 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5445 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5446 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5447 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5448 CGM.getTBAAInfoForSubobject(Base, FieldType));
5449}
5450
5452 if (E->isFileScope()) {
5453 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5454 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5455 }
5456 if (E->getType()->isVariablyModifiedType())
5457 // make sure to emit the VLA size.
5459
5460 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5461 const Expr *InitExpr = E->getInitializer();
5463
5464 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5465 /*Init*/ true);
5466
5467 // Block-scope compound literals are destroyed at the end of the enclosing
5468 // scope in C.
5469 if (!getLangOpts().CPlusPlus)
5472 E->getType(), getDestroyer(DtorKind),
5473 DtorKind & EHCleanup);
5474
5475 return Result;
5476}
5477
5479 if (!E->isGLValue())
5480 // Initializing an aggregate temporary in C++11: T{...}.
5481 return EmitAggExprToLValue(E);
5482
5483 // An lvalue initializer list must be initializing a reference.
5484 assert(E->isTransparent() && "non-transparent glvalue init list");
5485 return EmitLValue(E->getInit(0));
5486}
5487
5488/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5489/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5490/// LValue is returned and the current block has been terminated.
5491static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5492 const Expr *Operand) {
5493 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5494 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5495 return std::nullopt;
5496 }
5497
5498 return CGF.EmitLValue(Operand);
5499}
5500
5501namespace {
5502// Handle the case where the condition is a constant evaluatable simple integer,
5503// which means we don't have to separately handle the true/false blocks.
5504std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5505 CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
5506 const Expr *condExpr = E->getCond();
5507 bool CondExprBool;
5508 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5509 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5510 if (!CondExprBool)
5511 std::swap(Live, Dead);
5512
5513 if (!CGF.ContainsLabel(Dead)) {
5514 // If the true case is live, we need to track its region.
5515 if (CondExprBool)
5517 CGF.markStmtMaybeUsed(Dead);
5518 // If a throw expression we emit it and return an undefined lvalue
5519 // because it can't be used.
5520 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5521 CGF.EmitCXXThrowExpr(ThrowExpr);
5522 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5523 llvm::Type *Ty = CGF.UnqualPtrTy;
5524 return CGF.MakeAddrLValue(
5525 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5526 Dead->getType());
5527 }
5528 return CGF.EmitLValue(Live);
5529 }
5530 }
5531 return std::nullopt;
5532}
5533struct ConditionalInfo {
5534 llvm::BasicBlock *lhsBlock, *rhsBlock;
5535 std::optional<LValue> LHS, RHS;
5536};
5537
5538// Create and generate the 3 blocks for a conditional operator.
5539// Leaves the 'current block' in the continuation basic block.
5540template<typename FuncTy>
5541ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5542 const AbstractConditionalOperator *E,
5543 const FuncTy &BranchGenFunc) {
5544 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5545 CGF.createBasicBlock("cond.false"), std::nullopt,
5546 std::nullopt};
5547 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5548
5550 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5551 CGF.getProfileCount(E));
5552
5553 // Any temporaries created here are conditional.
5554 CGF.EmitBlock(Info.lhsBlock);
5556 eval.begin(CGF);
5557 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5558 eval.end(CGF);
5559 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5560
5561 if (Info.LHS)
5562 CGF.Builder.CreateBr(endBlock);
5563
5564 // Any temporaries created here are conditional.
5565 CGF.EmitBlock(Info.rhsBlock);
5566 eval.begin(CGF);
5567 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5568 eval.end(CGF);
5569 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5570 CGF.EmitBlock(endBlock);
5571
5572 return Info;
5573}
5574} // namespace
5575
5577 const AbstractConditionalOperator *E) {
5578 if (!E->isGLValue()) {
5579 // ?: here should be an aggregate.
5580 assert(hasAggregateEvaluationKind(E->getType()) &&
5581 "Unexpected conditional operator!");
5582 return (void)EmitAggExprToLValue(E);
5583 }
5584
5585 OpaqueValueMapping binding(*this, E);
5586 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5587 return;
5588
5589 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5590 CGF.EmitIgnoredExpr(E);
5591 return LValue{};
5592 });
5593}
5596 if (!expr->isGLValue()) {
5597 // ?: here should be an aggregate.
5598 assert(hasAggregateEvaluationKind(expr->getType()) &&
5599 "Unexpected conditional operator!");
5600 return EmitAggExprToLValue(expr);
5601 }
5602
5603 OpaqueValueMapping binding(*this, expr);
5604 if (std::optional<LValue> Res =
5605 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5606 return *Res;
5607
5608 ConditionalInfo Info = EmitConditionalBlocks(
5609 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5610 return EmitLValueOrThrowExpression(CGF, E);
5611 });
5612
5613 if ((Info.LHS && !Info.LHS->isSimple()) ||
5614 (Info.RHS && !Info.RHS->isSimple()))
5615 return EmitUnsupportedLValue(expr, "conditional operator");
5616
5617 if (Info.LHS && Info.RHS) {
5618 Address lhsAddr = Info.LHS->getAddress();
5619 Address rhsAddr = Info.RHS->getAddress();
5621 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5622 Builder.GetInsertBlock(), expr->getType());
5623 AlignmentSource alignSource =
5624 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5625 Info.RHS->getBaseInfo().getAlignmentSource());
5626 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
5627 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5628 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5629 TBAAInfo);
5630 } else {
5631 assert((Info.LHS || Info.RHS) &&
5632 "both operands of glvalue conditional are throw-expressions?");
5633 return Info.LHS ? *Info.LHS : *Info.RHS;
5634 }
5635}
5636
5637/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5638/// type. If the cast is to a reference, we can have the usual lvalue result,
5639/// otherwise if a cast is needed by the code generator in an lvalue context,
5640/// then it must mean that we need the address of an aggregate in order to
5641/// access one of its members. This can happen for all the reasons that casts
5642/// are permitted with aggregate result, including noop aggregate casts, and
5643/// cast from scalar to union.
5645 switch (E->getCastKind()) {
5646 case CK_ToVoid:
5647 case CK_BitCast:
5648 case CK_LValueToRValueBitCast:
5649 case CK_ArrayToPointerDecay:
5650 case CK_FunctionToPointerDecay:
5651 case CK_NullToMemberPointer:
5652 case CK_NullToPointer:
5653 case CK_IntegralToPointer:
5654 case CK_PointerToIntegral:
5655 case CK_PointerToBoolean:
5656 case CK_IntegralCast:
5657 case CK_BooleanToSignedIntegral:
5658 case CK_IntegralToBoolean:
5659 case CK_IntegralToFloating:
5660 case CK_FloatingToIntegral:
5661 case CK_FloatingToBoolean:
5662 case CK_FloatingCast:
5663 case CK_FloatingRealToComplex:
5664 case CK_FloatingComplexToReal:
5665 case CK_FloatingComplexToBoolean:
5666 case CK_FloatingComplexCast:
5667 case CK_FloatingComplexToIntegralComplex:
5668 case CK_IntegralRealToComplex:
5669 case CK_IntegralComplexToReal:
5670 case CK_IntegralComplexToBoolean:
5671 case CK_IntegralComplexCast:
5672 case CK_IntegralComplexToFloatingComplex:
5673 case CK_DerivedToBaseMemberPointer:
5674 case CK_BaseToDerivedMemberPointer:
5675 case CK_MemberPointerToBoolean:
5676 case CK_ReinterpretMemberPointer:
5677 case CK_AnyPointerToBlockPointerCast:
5678 case CK_ARCProduceObject:
5679 case CK_ARCConsumeObject:
5680 case CK_ARCReclaimReturnedObject:
5681 case CK_ARCExtendBlockObject:
5682 case CK_CopyAndAutoreleaseBlockObject:
5683 case CK_IntToOCLSampler:
5684 case CK_FloatingToFixedPoint:
5685 case CK_FixedPointToFloating:
5686 case CK_FixedPointCast:
5687 case CK_FixedPointToBoolean:
5688 case CK_FixedPointToIntegral:
5689 case CK_IntegralToFixedPoint:
5690 case CK_MatrixCast:
5691 case CK_HLSLVectorTruncation:
5692 case CK_HLSLArrayRValue:
5693 case CK_HLSLElementwiseCast:
5694 case CK_HLSLAggregateSplatCast:
5695 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5696
5697 case CK_Dependent:
5698 llvm_unreachable("dependent cast kind in IR gen!");
5699
5700 case CK_BuiltinFnToFnPtr:
5701 llvm_unreachable("builtin functions are handled elsewhere");
5702
5703 // These are never l-values; just use the aggregate emission code.
5704 case CK_NonAtomicToAtomic:
5705 case CK_AtomicToNonAtomic:
5706 return EmitAggExprToLValue(E);
5707
5708 case CK_Dynamic: {
5709 LValue LV = EmitLValue(E->getSubExpr());
5710 Address V = LV.getAddress();
5711 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5713 }
5714
5715 case CK_ConstructorConversion:
5716 case CK_UserDefinedConversion:
5717 case CK_CPointerToObjCPointerCast:
5718 case CK_BlockPointerToObjCPointerCast:
5719 case CK_LValueToRValue:
5720 return EmitLValue(E->getSubExpr());
5721
5722 case CK_NoOp: {
5723 // CK_NoOp can model a qualification conversion, which can remove an array
5724 // bound and change the IR type.
5725 // FIXME: Once pointee types are removed from IR, remove this.
5726 LValue LV = EmitLValue(E->getSubExpr());
5727 // Propagate the volatile qualifer to LValue, if exist in E.
5729 LV.getQuals() = E->getType().getQualifiers();
5730 if (LV.isSimple()) {
5731 Address V = LV.getAddress();
5732 if (V.isValid()) {
5733 llvm::Type *T = ConvertTypeForMem(E->getType());
5734 if (V.getElementType() != T)
5735 LV.setAddress(V.withElementType(T));
5736 }
5737 }
5738 return LV;
5739 }
5740
5741 case CK_UncheckedDerivedToBase:
5742 case CK_DerivedToBase: {
5743 auto *DerivedClassDecl = E->getSubExpr()->getType()->castAsCXXRecordDecl();
5744 LValue LV = EmitLValue(E->getSubExpr());
5745 Address This = LV.getAddress();
5746
5747 // Perform the derived-to-base conversion
5749 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5750 /*NullCheckValue=*/false, E->getExprLoc());
5751
5752 // TODO: Support accesses to members of base classes in TBAA. For now, we
5753 // conservatively pretend that the complete object is of the base class
5754 // type.
5755 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5756 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5757 }
5758 case CK_ToUnion:
5759 return EmitAggExprToLValue(E);
5760 case CK_BaseToDerived: {
5761 auto *DerivedClassDecl = E->getType()->castAsCXXRecordDecl();
5762 LValue LV = EmitLValue(E->getSubExpr());
5763
5764 // Perform the base-to-derived conversion
5766 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5767 /*NullCheckValue=*/false);
5768
5769 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5770 // performed and the object is not of the derived type.
5773 E->getType());
5774
5775 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5776 EmitVTablePtrCheckForCast(E->getType(), Derived,
5777 /*MayBeNull=*/false, CFITCK_DerivedCast,
5778 E->getBeginLoc());
5779
5780 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5781 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5782 }
5783 case CK_LValueBitCast: {
5784 // This must be a reinterpret_cast (or c-style equivalent).
5785 const auto *CE = cast<ExplicitCastExpr>(E);
5786
5787 CGM.EmitExplicitCastExprType(CE, this);
5788 LValue LV = EmitLValue(E->getSubExpr());
5790 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5791
5792 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5794 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5795 E->getBeginLoc());
5796
5797 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5798 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5799 }
5800 case CK_AddressSpaceConversion: {
5801 LValue LV = EmitLValue(E->getSubExpr());
5802 QualType DestTy = getContext().getPointerType(E->getType());
5803 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5804 *this, LV.getPointer(*this),
5805 E->getSubExpr()->getType().getAddressSpace(), ConvertType(DestTy));
5807 LV.getAddress().getAlignment()),
5808 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5809 }
5810 case CK_ObjCObjectLValueCast: {
5811 LValue LV = EmitLValue(E->getSubExpr());
5813 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5814 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5815 }
5816 case CK_ZeroToOCLOpaqueType:
5817 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5818
5819 case CK_VectorSplat: {
5820 // LValue results of vector splats are only supported in HLSL.
5821 if (!getLangOpts().HLSL)
5822 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5823 return EmitLValue(E->getSubExpr());
5824 }
5825 }
5826
5827 llvm_unreachable("Unhandled lvalue cast kind?");
5828}
5829
5834
5835std::pair<LValue, LValue>
5837 // Emitting the casted temporary through an opaque value.
5838 LValue BaseLV = EmitLValue(E->getArgLValue());
5840
5841 QualType ExprTy = E->getType();
5842 Address OutTemp = CreateIRTemp(ExprTy);
5843 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
5844
5845 if (E->isInOut())
5847 TempLV);
5848
5850 return std::make_pair(BaseLV, TempLV);
5851}
5852
5854 CallArgList &Args, QualType Ty) {
5855
5856 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
5857
5858 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
5859 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
5860
5862
5863 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
5864 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast());
5865 Args.add(RValue::get(TmpAddr, *this), Ty);
5866 return TempLV;
5867}
5868
5869LValue
5872
5873 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5874 it = OpaqueLValues.find(e);
5875
5876 if (it != OpaqueLValues.end())
5877 return it->second;
5878
5879 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5880 return EmitLValue(e->getSourceExpr());
5881}
5882
5883RValue
5886
5887 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5888 it = OpaqueRValues.find(e);
5889
5890 if (it != OpaqueRValues.end())
5891 return it->second;
5892
5893 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5894 return EmitAnyExpr(e->getSourceExpr());
5895}
5896
5899 return OpaqueLValues.contains(E);
5900 return OpaqueRValues.contains(E);
5901}
5902
5904 const FieldDecl *FD,
5905 SourceLocation Loc) {
5906 QualType FT = FD->getType();
5907 LValue FieldLV = EmitLValueForField(LV, FD);
5908 switch (getEvaluationKind(FT)) {
5909 case TEK_Complex:
5910 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5911 case TEK_Aggregate:
5912 return FieldLV.asAggregateRValue();
5913 case TEK_Scalar:
5914 // This routine is used to load fields one-by-one to perform a copy, so
5915 // don't load reference fields.
5916 if (FD->getType()->isReferenceType())
5917 return RValue::get(FieldLV.getPointer(*this));
5918 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5919 // primitive load.
5920 if (FieldLV.isBitField())
5921 return EmitLoadOfLValue(FieldLV, Loc);
5922 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5923 }
5924 llvm_unreachable("bad evaluation kind");
5925}
5926
5927//===--------------------------------------------------------------------===//
5928// Expression Emission
5929//===--------------------------------------------------------------------===//
5930
5933 llvm::CallBase **CallOrInvoke) {
5934 llvm::CallBase *CallOrInvokeStorage;
5935 if (!CallOrInvoke) {
5936 CallOrInvoke = &CallOrInvokeStorage;
5937 }
5938
5939 auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] {
5940 if (E->isCoroElideSafe()) {
5941 auto *I = *CallOrInvoke;
5942 if (I)
5943 I->addFnAttr(llvm::Attribute::CoroElideSafe);
5944 }
5945 });
5946
5947 // Builtins never have block type.
5948 if (E->getCallee()->getType()->isBlockPointerType())
5949 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
5950
5951 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5952 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
5953
5954 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5955 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
5956
5957 // A CXXOperatorCallExpr is created even for explicit object methods, but
5958 // these should be treated like static function call.
5959 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5960 if (const auto *MD =
5961 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5962 MD && MD->isImplicitObjectMemberFunction())
5963 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
5964
5965 CGCallee callee = EmitCallee(E->getCallee());
5966
5967 if (callee.isBuiltin()) {
5968 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5969 E, ReturnValue);
5970 }
5971
5972 if (callee.isPseudoDestructor()) {
5974 }
5975
5976 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
5977 /*Chain=*/nullptr, CallOrInvoke);
5978}
5979
5980/// Emit a CallExpr without considering whether it might be a subclass.
5983 llvm::CallBase **CallOrInvoke) {
5984 CGCallee Callee = EmitCallee(E->getCallee());
5985 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
5986 /*Chain=*/nullptr, CallOrInvoke);
5987}
5988
5989// Detect the unusual situation where an inline version is shadowed by a
5990// non-inline version. In that case we should pick the external one
5991// everywhere. That's GCC behavior too.
5993 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5994 if (!PD->isInlineBuiltinDeclaration())
5995 return false;
5996 return true;
5997}
5998
6000 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
6001
6002 if (auto builtinID = FD->getBuiltinID()) {
6003 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
6004 std::string NoBuiltins = "no-builtins";
6005
6006 StringRef Ident = CGF.CGM.getMangledName(GD);
6007 std::string FDInlineName = (Ident + ".inline").str();
6008
6009 bool IsPredefinedLibFunction =
6011 bool HasAttributeNoBuiltin =
6012 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
6013 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
6014
6015 // When directing calling an inline builtin, call it through it's mangled
6016 // name to make it clear it's not the actual builtin.
6017 if (CGF.CurFn->getName() != FDInlineName &&
6019 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6020 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
6021 llvm::Module *M = Fn->getParent();
6022 llvm::Function *Clone = M->getFunction(FDInlineName);
6023 if (!Clone) {
6024 Clone = llvm::Function::Create(Fn->getFunctionType(),
6025 llvm::GlobalValue::InternalLinkage,
6026 Fn->getAddressSpace(), FDInlineName, M);
6027 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
6028 }
6029 return CGCallee::forDirect(Clone, GD);
6030 }
6031
6032 // Replaceable builtins provide their own implementation of a builtin. If we
6033 // are in an inline builtin implementation, avoid trivial infinite
6034 // recursion. Honor __attribute__((no_builtin("foo"))) or
6035 // __attribute__((no_builtin)) on the current function unless foo is
6036 // not a predefined library function which means we must generate the
6037 // builtin no matter what.
6038 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
6039 return CGCallee::forBuiltin(builtinID, FD);
6040 }
6041
6042 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6043 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
6044 FD->hasAttr<CUDAGlobalAttr>())
6045 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
6046 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
6047
6048 return CGCallee::forDirect(CalleePtr, GD);
6049}
6050
6052 if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6054 return GlobalDecl(FD);
6055}
6056
6058 E = E->IgnoreParens();
6059
6060 // Look through function-to-pointer decay.
6061 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
6062 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
6063 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
6064 return EmitCallee(ICE->getSubExpr());
6065 }
6066
6067 // Try to remember the original __ptrauth qualifier for loads of
6068 // function pointers.
6069 if (ICE->getCastKind() == CK_LValueToRValue) {
6070 const Expr *SubExpr = ICE->getSubExpr();
6071 if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) {
6072 std::pair<llvm::Value *, CGPointerAuthInfo> Result =
6074
6076 assert(FunctionType->isFunctionType());
6077
6078 GlobalDecl GD;
6079 if (const auto *VD =
6080 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) {
6081 GD = GlobalDecl(VD);
6082 }
6084 CGCallee Callee(CalleeInfo, Result.first, Result.second);
6085 return Callee;
6086 }
6087 }
6088
6089 // Resolve direct calls.
6090 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
6091 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
6093 }
6094 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
6095 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
6096 EmitIgnoredExpr(ME->getBase());
6097 return EmitDirectCallee(*this, FD);
6098 }
6099
6100 // Look through template substitutions.
6101 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
6102 return EmitCallee(NTTP->getReplacement());
6103
6104 // Treat pseudo-destructor calls differently.
6105 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
6107 }
6108
6109 // Otherwise, we have an indirect reference.
6110 llvm::Value *calleePtr;
6112 if (auto ptrType = E->getType()->getAs<PointerType>()) {
6113 calleePtr = EmitScalarExpr(E);
6114 functionType = ptrType->getPointeeType();
6115 } else {
6116 functionType = E->getType();
6117 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
6118 }
6119 assert(functionType->isFunctionType());
6120
6121 GlobalDecl GD;
6122 if (const auto *VD =
6123 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
6124 GD = GlobalDecl(VD);
6125
6126 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
6127 CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
6128 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
6129 return callee;
6130}
6131
6133 // Comma expressions just emit their LHS then their RHS as an l-value.
6134 if (E->getOpcode() == BO_Comma) {
6135 EmitIgnoredExpr(E->getLHS());
6137 return EmitLValue(E->getRHS());
6138 }
6139
6140 if (E->getOpcode() == BO_PtrMemD ||
6141 E->getOpcode() == BO_PtrMemI)
6143
6144 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
6145
6146 // Create a Key Instructions source location atom group that covers both
6147 // LHS and RHS expressions. Nested RHS expressions may get subsequently
6148 // separately grouped (1 below):
6149 //
6150 // 1. `a = b = c` -> Two atoms.
6151 // 2. `x = new(1)` -> One atom (for both addr store and value store).
6152 // 3. Complex and agg assignment -> One atom.
6154
6155 // Note that in all of these cases, __block variables need the RHS
6156 // evaluated first just in case the variable gets moved by the RHS.
6157
6158 switch (getEvaluationKind(E->getType())) {
6159 case TEK_Scalar: {
6160 if (PointerAuthQualifier PtrAuth =
6161 E->getLHS()->getType().getPointerAuth()) {
6163 LValue CopiedLV = LV;
6164 CopiedLV.getQuals().removePointerAuth();
6165 llvm::Value *RV =
6166 EmitPointerAuthQualify(PtrAuth, E->getRHS(), CopiedLV.getAddress());
6167 EmitNullabilityCheck(CopiedLV, RV, E->getExprLoc());
6168 EmitStoreThroughLValue(RValue::get(RV), CopiedLV);
6169 return LV;
6170 }
6171
6172 switch (E->getLHS()->getType().getObjCLifetime()) {
6174 return EmitARCStoreStrong(E, /*ignored*/ false).first;
6175
6177 return EmitARCStoreAutoreleasing(E).first;
6178
6179 // No reason to do any of these differently.
6183 break;
6184 }
6185
6186 // TODO: Can we de-duplicate this code with the corresponding code in
6187 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
6188 RValue RV;
6189 llvm::Value *Previous = nullptr;
6190 QualType SrcType = E->getRHS()->getType();
6191 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
6192 // we want to extract that value and potentially (if the bitfield sanitizer
6193 // is enabled) use it to check for an implicit conversion.
6194 if (E->getLHS()->refersToBitField()) {
6195 llvm::Value *RHS =
6197 RV = RValue::get(RHS);
6198 } else
6199 RV = EmitAnyExpr(E->getRHS());
6200
6202
6203 if (RV.isScalar())
6205
6206 if (LV.isBitField()) {
6207 llvm::Value *Result = nullptr;
6208 // If bitfield sanitizers are enabled we want to use the result
6209 // to check whether a truncation or sign change has occurred.
6210 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
6212 else
6214
6215 // If the expression contained an implicit conversion, make sure
6216 // to use the value before the scalar conversion.
6217 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
6218 QualType DstType = E->getLHS()->getType();
6219 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
6220 LV.getBitFieldInfo(), E->getExprLoc());
6221 } else
6222 EmitStoreThroughLValue(RV, LV);
6223
6224 if (getLangOpts().OpenMP)
6225 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
6226 E->getLHS());
6227 return LV;
6228 }
6229
6230 case TEK_Complex:
6232
6233 case TEK_Aggregate:
6234 // If the lang opt is HLSL and the LHS is a constant array
6235 // then we are performing a copy assignment and call a special
6236 // function because EmitAggExprToLValue emits to a temporary LValue
6238 return EmitHLSLArrayAssignLValue(E);
6239
6240 return EmitAggExprToLValue(E);
6241 }
6242 llvm_unreachable("bad evaluation kind");
6243}
6244
6245// This function implements trivial copy assignment for HLSL's
6246// assignable constant arrays.
6248 // Don't emit an LValue for the RHS because it might not be an LValue
6249 LValue LHS = EmitLValue(E->getLHS());
6250 // In C the RHS of an assignment operator is an RValue.
6251 // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call
6252 // EmitInitializationToLValue to emit an RValue into an LValue.
6254 return LHS;
6255}
6256
6258 llvm::CallBase **CallOrInvoke) {
6259 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
6260
6261 if (!RV.isScalar())
6262 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6264
6265 assert(E->getCallReturnType(getContext())->isReferenceType() &&
6266 "Can't have a scalar return unless the return type is a "
6267 "reference type!");
6268
6270}
6271
6273 // FIXME: This shouldn't require another copy.
6274 return EmitAggExprToLValue(E);
6275}
6276
6279 && "binding l-value to type which needs a temporary");
6280 AggValueSlot Slot = CreateAggTemp(E->getType());
6281 EmitCXXConstructExpr(E, Slot);
6283}
6284
6285LValue
6289
6291 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
6292 .withElementType(ConvertType(E->getType()));
6293}
6294
6299
6300LValue
6308
6311
6312 if (!RV.isScalar())
6313 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6315
6316 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
6317 "Can't have a scalar return unless the return type is a "
6318 "reference type!");
6319
6321}
6322
6324 Address V =
6325 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
6327}
6328
6330 const ObjCIvarDecl *Ivar) {
6331 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
6332}
6333
6334llvm::Value *
6336 const ObjCIvarDecl *Ivar) {
6337 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
6338 QualType PointerDiffType = getContext().getPointerDiffType();
6339 return Builder.CreateZExtOrTrunc(OffsetValue,
6340 getTypes().ConvertType(PointerDiffType));
6341}
6342
6344 llvm::Value *BaseValue,
6345 const ObjCIvarDecl *Ivar,
6346 unsigned CVRQualifiers) {
6347 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
6348 Ivar, CVRQualifiers);
6349}
6350
6352 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
6353 llvm::Value *BaseValue = nullptr;
6354 const Expr *BaseExpr = E->getBase();
6355 Qualifiers BaseQuals;
6356 QualType ObjectTy;
6357 if (E->isArrow()) {
6358 BaseValue = EmitScalarExpr(BaseExpr);
6359 ObjectTy = BaseExpr->getType()->getPointeeType();
6360 BaseQuals = ObjectTy.getQualifiers();
6361 } else {
6362 LValue BaseLV = EmitLValue(BaseExpr);
6363 BaseValue = BaseLV.getPointer(*this);
6364 ObjectTy = BaseExpr->getType();
6365 BaseQuals = ObjectTy.getQualifiers();
6366 }
6367
6368 LValue LV =
6369 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
6370 BaseQuals.getCVRQualifiers());
6372 return LV;
6373}
6374
6376 // Can only get l-value for message expression returning aggregate type
6377 RValue RV = EmitAnyExprToTemp(E);
6378 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6380}
6381
6383 const CGCallee &OrigCallee, const CallExpr *E,
6385 llvm::Value *Chain,
6386 llvm::CallBase **CallOrInvoke,
6387 CGFunctionInfo const **ResolvedFnInfo) {
6388 // Get the actual function type. The callee type will always be a pointer to
6389 // function type or a block pointer type.
6390 assert(CalleeType->isFunctionPointerType() &&
6391 "Call must have function pointer type!");
6392
6393 const Decl *TargetDecl =
6394 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
6395
6396 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
6397 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
6398 "trying to emit a call to an immediate function");
6399
6400 CalleeType = getContext().getCanonicalType(CalleeType);
6401
6402 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
6403
6404 CGCallee Callee = OrigCallee;
6405
6406 if (SanOpts.has(SanitizerKind::Function) &&
6407 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6408 !isa<FunctionNoProtoType>(PointeeType)) {
6409 if (llvm::Constant *PrefixSig =
6410 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
6411 auto CheckOrdinal = SanitizerKind::SO_Function;
6412 auto CheckHandler = SanitizerHandler::FunctionTypeMismatch;
6413 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6414 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6415
6416 llvm::Type *PrefixSigType = PrefixSig->getType();
6417 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6418 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6419
6420 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6421 if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
6422 // Use raw pointer since we are using the callee pointer as data here.
6423 Address Addr =
6424 Address(CalleePtr, CalleePtr->getType(),
6426 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6427 Callee.getPointerAuthInfo(), nullptr);
6428 CalleePtr = Addr.emitRawPointer(*this);
6429 }
6430
6431 // On 32-bit Arm, the low bit of a function pointer indicates whether
6432 // it's using the Arm or Thumb instruction set. The actual first
6433 // instruction lives at the same address either way, so we must clear
6434 // that low bit before using the function address to find the prefix
6435 // structure.
6436 //
6437 // This applies to both Arm and Thumb target triples, because
6438 // either one could be used in an interworking context where it
6439 // might be passed function pointers of both types.
6440 llvm::Value *AlignedCalleePtr;
6441 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6442 llvm::Value *CalleeAddress =
6443 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
6444 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
6445 llvm::Value *AlignedCalleeAddress =
6446 Builder.CreateAnd(CalleeAddress, Mask);
6447 AlignedCalleePtr =
6448 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
6449 } else {
6450 AlignedCalleePtr = CalleePtr;
6451 }
6452
6453 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6454 llvm::Value *CalleeSigPtr =
6455 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6456 llvm::Value *CalleeSig =
6457 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6458 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6459
6460 llvm::BasicBlock *Cont = createBasicBlock("cont");
6461 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6462 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6463
6464 EmitBlock(TypeCheck);
6465 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6466 Int32Ty,
6467 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6468 getPointerAlign());
6469 llvm::Value *CalleeTypeHashMatch =
6470 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6471 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6472 EmitCheckTypeDescriptor(CalleeType)};
6473 EmitCheck(std::make_pair(CalleeTypeHashMatch, CheckOrdinal), CheckHandler,
6474 StaticData, {CalleePtr});
6475
6476 Builder.CreateBr(Cont);
6477 EmitBlock(Cont);
6478 }
6479 }
6480
6481 const auto *FnType = cast<FunctionType>(PointeeType);
6482
6483 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
6484 FD && DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6485 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType);
6486
6487 bool CFIUnchecked =
6489
6490 // If we are checking indirect calls and this call is indirect, check that the
6491 // function pointer is a member of the bit set for the function type.
6492 if (SanOpts.has(SanitizerKind::CFIICall) &&
6493 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && !CFIUnchecked) {
6494 auto CheckOrdinal = SanitizerKind::SO_CFIICall;
6495 auto CheckHandler = SanitizerHandler::CFICheckFail;
6496 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6497 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6498
6499 llvm::Metadata *MD =
6500 CGM.CreateMetadataIdentifierForFnType(QualType(FnType, 0));
6501
6502 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6503
6504 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6505 llvm::Value *TypeTest = Builder.CreateCall(
6506 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6507
6508 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6509 llvm::Constant *StaticData[] = {
6510 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6513 };
6514 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6515 EmitCfiSlowPathCheck(CheckOrdinal, TypeTest, CrossDsoTypeId, CalleePtr,
6516 StaticData);
6517 } else {
6518 EmitCheck(std::make_pair(TypeTest, CheckOrdinal), CheckHandler,
6519 StaticData, {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6520 }
6521 }
6522
6523 CallArgList Args;
6524 if (Chain)
6525 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6526
6527 // C++17 requires that we evaluate arguments to a call using assignment syntax
6528 // right-to-left, and that we evaluate arguments to certain other operators
6529 // left-to-right. Note that we allow this to override the order dictated by
6530 // the calling convention on the MS ABI, which means that parameter
6531 // destruction order is not necessarily reverse construction order.
6532 // FIXME: Revisit this based on C++ committee response to unimplementability.
6534 bool StaticOperator = false;
6535 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
6536 if (OCE->isAssignmentOp())
6538 else {
6539 switch (OCE->getOperator()) {
6540 case OO_LessLess:
6541 case OO_GreaterGreater:
6542 case OO_AmpAmp:
6543 case OO_PipePipe:
6544 case OO_Comma:
6545 case OO_ArrowStar:
6547 break;
6548 default:
6549 break;
6550 }
6551 }
6552
6553 if (const auto *MD =
6554 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6555 MD && MD->isStatic())
6556 StaticOperator = true;
6557 }
6558
6559 auto Arguments = E->arguments();
6560 if (StaticOperator) {
6561 // If we're calling a static operator, we need to emit the object argument
6562 // and ignore it.
6563 EmitIgnoredExpr(E->getArg(0));
6564 Arguments = drop_begin(Arguments, 1);
6565 }
6566 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6567 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6568
6569 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
6570 Args, FnType, /*ChainCall=*/Chain);
6571
6572 if (ResolvedFnInfo)
6573 *ResolvedFnInfo = &FnInfo;
6574
6575 // HIP function pointer contains kernel handle when it is used in triple
6576 // chevron. The kernel stub needs to be loaded from kernel handle and used
6577 // as callee.
6578 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6580 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6581 llvm::Value *Handle = Callee.getFunctionPointer();
6582 auto *Stub = Builder.CreateLoad(
6583 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6584 Callee.setFunctionPointer(Stub);
6585 }
6586 llvm::CallBase *LocalCallOrInvoke = nullptr;
6587 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
6588 E == MustTailCall, E->getExprLoc());
6589
6590 // Generate function declaration DISuprogram in order to be used
6591 // in debug info about call sites.
6592 if (CGDebugInfo *DI = getDebugInfo()) {
6593 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6594 FunctionArgList Args;
6595 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6596 DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
6597 DI->getFunctionType(CalleeDecl, ResTy, Args),
6598 CalleeDecl);
6599 }
6600 }
6601 if (CallOrInvoke)
6602 *CallOrInvoke = LocalCallOrInvoke;
6603
6604 return Call;
6605}
6606
6609 Address BaseAddr = Address::invalid();
6610 if (E->getOpcode() == BO_PtrMemI) {
6611 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6612 } else {
6613 BaseAddr = EmitLValue(E->getLHS()).getAddress();
6614 }
6615
6616 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6617 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6618
6619 LValueBaseInfo BaseInfo;
6620 TBAAAccessInfo TBAAInfo;
6621 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
6624 E, BaseAddr, OffsetV, MPT, IsInBounds, &BaseInfo, &TBAAInfo);
6625
6626 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6627}
6628
6629/// Given the address of a temporary variable, produce an r-value of
6630/// its type.
6632 QualType type,
6633 SourceLocation loc) {
6635 switch (getEvaluationKind(type)) {
6636 case TEK_Complex:
6637 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6638 case TEK_Aggregate:
6639 return lvalue.asAggregateRValue();
6640 case TEK_Scalar:
6641 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6642 }
6643 llvm_unreachable("bad evaluation kind");
6644}
6645
6646void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6647 assert(Val->getType()->isFPOrFPVectorTy());
6648 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6649 return;
6650
6651 llvm::MDBuilder MDHelper(getLLVMContext());
6652 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6653
6654 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6655}
6656
6658 llvm::Type *EltTy = Val->getType()->getScalarType();
6659 if (!EltTy->isFloatTy())
6660 return;
6661
6662 if ((getLangOpts().OpenCL &&
6663 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6664 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6665 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6666 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6667 //
6668 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6669 // build option allows an application to specify that single precision
6670 // floating-point divide (x/y and 1/x) and sqrt used in the program
6671 // source are correctly rounded.
6672 //
6673 // TODO: CUDA has a prec-sqrt flag
6674 SetFPAccuracy(Val, 3.0f);
6675 }
6676}
6677
6679 llvm::Type *EltTy = Val->getType()->getScalarType();
6680 if (!EltTy->isFloatTy())
6681 return;
6682
6683 if ((getLangOpts().OpenCL &&
6684 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6685 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6686 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6687 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6688 //
6689 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6690 // build option allows an application to specify that single precision
6691 // floating-point divide (x/y and 1/x) and sqrt used in the program
6692 // source are correctly rounded.
6693 //
6694 // TODO: CUDA has a prec-div flag
6695 SetFPAccuracy(Val, 2.5f);
6696 }
6697}
6698
6699namespace {
6700 struct LValueOrRValue {
6701 LValue LV;
6702 RValue RV;
6703 };
6704}
6705
6706static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6707 const PseudoObjectExpr *E,
6708 bool forLValue,
6709 AggValueSlot slot) {
6711
6712 // Find the result expression, if any.
6713 const Expr *resultExpr = E->getResultExpr();
6714 LValueOrRValue result;
6715
6717 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6718 const Expr *semantic = *i;
6719
6720 // If this semantic expression is an opaque value, bind it
6721 // to the result of its source expression.
6722 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6723 // Skip unique OVEs.
6724 if (ov->isUnique()) {
6725 assert(ov != resultExpr &&
6726 "A unique OVE cannot be used as the result expression");
6727 continue;
6728 }
6729
6730 // If this is the result expression, we may need to evaluate
6731 // directly into the slot.
6733 OVMA opaqueData;
6734 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6736 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6737 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6739 opaqueData = OVMA::bind(CGF, ov, LV);
6740 result.RV = slot.asRValue();
6741
6742 // Otherwise, emit as normal.
6743 } else {
6744 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6745
6746 // If this is the result, also evaluate the result now.
6747 if (ov == resultExpr) {
6748 if (forLValue)
6749 result.LV = CGF.EmitLValue(ov);
6750 else
6751 result.RV = CGF.EmitAnyExpr(ov, slot);
6752 }
6753 }
6754
6755 opaques.push_back(opaqueData);
6756
6757 // Otherwise, if the expression is the result, evaluate it
6758 // and remember the result.
6759 } else if (semantic == resultExpr) {
6760 if (forLValue)
6761 result.LV = CGF.EmitLValue(semantic);
6762 else
6763 result.RV = CGF.EmitAnyExpr(semantic, slot);
6764
6765 // Otherwise, evaluate the expression in an ignored context.
6766 } else {
6767 CGF.EmitIgnoredExpr(semantic);
6768 }
6769 }
6770
6771 // Unbind all the opaques now.
6772 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques)
6773 opaque.unbind(CGF);
6774
6775 return result;
6776}
6777
6779 AggValueSlot slot) {
6780 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6781}
6782
6786
6788 Address Addr, QualType AddrType,
6789 SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList,
6790 SmallVectorImpl<QualType> &FlatTypes) {
6791 // WorkList is list of type we are processing + the Index List to access
6792 // the field of that type in Addr for use in a GEP
6794 16>
6795 WorkList;
6796 llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32);
6797 // Addr should be a pointer so we need to 'dereference' it
6798 WorkList.push_back({AddrType, {llvm::ConstantInt::get(IdxTy, 0)}});
6799
6800 while (!WorkList.empty()) {
6801 auto [T, IdxList] = WorkList.pop_back_val();
6802 T = T.getCanonicalType().getUnqualifiedType();
6803 assert(!isa<MatrixType>(T) && "Matrix types not yet supported in HLSL");
6804 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) {
6805 uint64_t Size = CAT->getZExtSize();
6806 for (int64_t I = Size - 1; I > -1; I--) {
6807 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
6808 IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
6809 WorkList.emplace_back(CAT->getElementType(), IdxListCopy);
6810 }
6811 } else if (const auto *RT = dyn_cast<RecordType>(T)) {
6812 const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf();
6813 assert(!Record->isUnion() && "Union types not supported in flat cast.");
6814
6815 const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
6816
6818 if (CXXD && CXXD->isStandardLayout())
6820
6821 // deal with potential base classes
6822 if (CXXD && !CXXD->isStandardLayout()) {
6823 for (auto &Base : CXXD->bases())
6824 FieldTypes.push_back(Base.getType());
6825 }
6826
6827 for (auto *FD : Record->fields())
6828 FieldTypes.push_back(FD->getType());
6829
6830 for (int64_t I = FieldTypes.size() - 1; I > -1; I--) {
6831 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
6832 IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
6833 WorkList.insert(WorkList.end(), {FieldTypes[I], IdxListCopy});
6834 }
6835 } else if (const auto *VT = dyn_cast<VectorType>(T)) {
6836 llvm::Type *LLVMT = ConvertTypeForMem(T);
6838 Address GEP =
6839 Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "vector.gep");
6840 for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) {
6841 llvm::Value *Idx = llvm::ConstantInt::get(IdxTy, I);
6842 // gep on vector fields is not recommended so combine gep with
6843 // extract/insert
6844 AccessList.emplace_back(GEP, Idx);
6845 FlatTypes.push_back(VT->getElementType());
6846 }
6847 } else {
6848 // a scalar/builtin type
6849 llvm::Type *LLVMT = ConvertTypeForMem(T);
6851 Address GEP =
6852 Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "gep");
6853 AccessList.emplace_back(GEP, nullptr);
6854 FlatTypes.push_back(T);
6855 }
6856 }
6857}
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition CGExpr.cpp:2864
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition CGExpr.cpp:3133
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition CGExpr.cpp:712
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition CGExpr.cpp:4265
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition CGExpr.cpp:4454
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition CGExpr.cpp:4320
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type?
Definition CGExpr.cpp:1839
@ CEK_AsReferenceOnly
Definition CGExpr.cpp:1841
@ CEK_AsValueOnly
Definition CGExpr.cpp:1843
@ CEK_None
Definition CGExpr.cpp:1840
@ CEK_AsValueOrReference
Definition CGExpr.cpp:1842
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition CGExpr.cpp:1812
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition CGExpr.cpp:3121
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition CGExpr.cpp:5491
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition CGExpr.cpp:3713
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition CGExpr.cpp:4279
SmallVector< llvm::Value *, 8 > RecIndicesTy
Definition CGExpr.cpp:1152
static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD)
Definition CGExpr.cpp:6051
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition CGExpr.cpp:3108
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition CGExpr.cpp:2210
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition CGExpr.cpp:6706
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition CGExpr.cpp:4336
static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID)
Definition CGExpr.cpp:89
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition CGExpr.cpp:1004
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition CGExpr.cpp:2310
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition CGExpr.cpp:1845
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field, bool IsInBounds)
Get the address of a zero-sized field within a record.
Definition CGExpr.cpp:5187
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition CGExpr.cpp:1644
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field, bool IsInBounds)
Drill down to the storage of a field without walking into reference types.
Definition CGExpr.cpp:5204
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition CGExpr.cpp:1984
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition CGExpr.cpp:4485
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition CGExpr.cpp:5999
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition CGExpr.cpp:2961
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition CGExpr.cpp:1154
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition CGExpr.cpp:5992
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition CGExpr.cpp:3057
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition CGExpr.cpp:5233
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition CGExpr.cpp:4349
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition CGExpr.cpp:2975
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD)
Determine whether we can emit a reference to VD from the current context, despite not necessarily hav...
Definition CGExpr.cpp:3158
VariableTypeDescriptorKind
Definition CGExpr.cpp:74
@ TK_Float
A floating-point type.
Definition CGExpr.cpp:78
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition CGExpr.cpp:82
@ TK_Integer
An integer type.
Definition CGExpr.cpp:76
@ TK_BitInt
An _BitInt(N) type.
Definition CGExpr.cpp:80
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition CGExpr.cpp:2235
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition CGExpr.cpp:1376
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition CGExpr.cpp:5220
const SanitizerHandlerInfo SanitizerHandlers[]
Definition CGExpr.cpp:3730
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition CGExpr.cpp:3736
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition CGExpr.cpp:4796
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
FormatToken * Previous
The previous token in the unwrapped line.
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
Defines the clang::Module class, which describes a module in the source code.
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
#define LIST_SANITIZER_CHECKS
SanitizerHandler
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
a trap message and trap category.
const LValueBase getLValueBase() const
Definition APValue.cpp:983
bool isLValue() const
Definition APValue.h:472
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:188
SourceManager & getSourceManager()
Definition ASTContext.h:798
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
Builtin::Context & BuiltinInfo
Definition ASTContext.h:739
const LangOptions & getLangOpts() const
Definition ASTContext.h:891
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
unsigned getTargetAddressSpace(LangAS AS) const
bool isSentinelNullExpr(const Expr *E)
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4287
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition Expr.h:7090
Expr * getBase()
Get base of the array section.
Definition Expr.h:7156
Expr * getLength()
Get length of array section.
Definition Expr.h:7166
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition Expr.cpp:5224
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:7195
Expr * getLowerBound()
Get lower bound of array section.
Definition Expr.h:7160
bool isOpenACCArraySection() const
Definition Expr.h:7153
SourceLocation getColonLocFirst() const
Definition Expr.h:7187
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2776
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2750
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3720
QualType getElementType() const
Definition TypeBase.h:3732
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
SourceLocation getExprLoc() const
Definition Expr.h:4013
Expr * getRHS() const
Definition Expr.h:4024
static bool isAdditiveOp(Opcode Opc)
Definition Expr.h:4058
Opcode getOpcode() const
Definition Expr.h:4017
A fixed int type of a specified bitwidth.
Definition TypeBase.h:8137
unsigned getNumBits() const
Definition TypeBase.h:8149
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition Builtins.h:313
Represents binding an expression to a temporary.
Definition ExprCXX.h:1494
CXXTemporary * getTemporary()
Definition ExprCXX.h:1512
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1366
bool isStandardLayout() const
Determine whether this class is standard-layout per C++ [class]p7.
Definition DeclCXX.h:1225
base_class_range bases()
Definition DeclCXX.h:608
bool isDynamicClass() const
Definition DeclCXX.h:574
bool hasDefinition() const
Definition DeclCXX.h:561
const CXXRecordDecl * getStandardLayoutBaseWithFields() const
If this is a standard-layout class or union, any and all data members will be declared in the same ty...
Definition DeclCXX.cpp:559
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:848
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition ExprCXX.h:1069
MSGuidDecl * getGuidDecl() const
Definition ExprCXX.h:1115
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
SourceLocation getBeginLoc() const
Definition Expr.h:3211
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
Expr * getCallee()
Definition Expr.h:3024
bool isCoroElideSafe() const
Definition Expr.h:3051
arg_range arguments()
Definition Expr.h:3129
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
path_iterator path_begin()
Definition Expr.h:3680
CastKind getCastKind() const
Definition Expr.h:3654
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
path_iterator path_end()
Definition Expr.h:3681
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
@ None
Trap Messages are omitted.
@ Detailed
Trap Message includes more context (e.g.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:261
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:572
Address getAddress() const
Definition CGValue.h:644
void setExternallyDestructed(bool destructed=true)
Definition CGValue.h:613
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:602
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:587
RValue asRValue() const
Definition CGValue.h:666
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition CGBuilder.h:309
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition CGBuilder.h:296
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition CGBuilder.h:335
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition CGBuilder.h:245
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:223
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Definition CGBuilder.h:319
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition CGBuilder.h:417
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:193
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
Abstract information about a function or function prototype.
Definition CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition CGCall.h:59
All available information about a concrete callee.
Definition CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CGCall.h:172
bool isPseudoDestructor() const
Definition CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition CGCall.h:123
unsigned getBuiltinID() const
Definition CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
bool isBuiltin() const
Definition CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
Definition CGCall.h:320
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
Definition CGExpr.cpp:4776
LValue EmitCoawaitLValue(const CoawaitExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2752
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
Definition CGExpr.cpp:3030
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
Definition CGExpr.cpp:6277
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:5594
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
Definition CGExpr.cpp:1276
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6678
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitInitListLValue(const InitListExpr *E)
Definition CGExpr.cpp:5478
bool isUnderlyingBasePointerConstantNull(const Expr *E)
Check whether the underlying base pointer is a constant null.
Definition CGExpr.cpp:5060
void EmitARCInitWeak(Address addr, llvm::Value *value)
i8* @objc_initWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2663
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:181
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
Definition CGExpr.cpp:4562
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
Definition CGExpr.cpp:6309
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1185
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
llvm::Type * ConvertType(QualType T)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6290
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
CGCapturedStmtInfo * CapturedStmtInfo
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
Definition CGClass.cpp:286
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
Definition CGExpr.cpp:2759
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Definition CGExpr.cpp:3496
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
Definition CGExpr.cpp:5451
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:6631
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2680
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3648
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6295
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6657
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Emit a CallExpr without considering whether it might be a subclass.
Definition CGExpr.cpp:5981
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
Definition CGExpr.cpp:721
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1236
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:6778
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
Definition CGExpr.cpp:5170
const LangOptions & getLangOpts() const
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
Definition CGExpr.cpp:4019
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:684
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition CGExpr.cpp:6608
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
Definition CGExpr.cpp:6343
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:394
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:5576
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition CGDecl.cpp:2278
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
Definition CGDecl.cpp:787
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
Definition CGExpr.cpp:3039
RValue EmitLoadOfGlobalRegLValue(LValue LV)
Load of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:2509
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2890
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6132
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition CGDecl.cpp:2251
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
Definition CGExpr.cpp:2012
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition CGExpr.cpp:6783
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3538
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
Definition CGExpr.cpp:1250
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
Definition CGExpr.cpp:5830
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
Definition CGExpr.cpp:963
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6335
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Definition CGExpr.cpp:2318
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5252
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:174
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, bool IsInBounds, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Emit the address of a field using a member data pointer.
Definition CGClass.cpp:150
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
Definition CGExpr.cpp:5853
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
Definition CGExpr.cpp:726
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6057
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:242
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:5931
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2336
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
Definition CGExpr.cpp:4835
Address GetAddrOfBlockDecl(const VarDecl *var)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
Definition CGExpr.cpp:3980
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
Definition CGExpr.cpp:6646
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:223
LValue EmitPredefinedLValue(const PredefinedExpr *E)
Definition CGExpr.cpp:3501
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3788
LValue EmitDeclRefLValue(const DeclRefExpr *E)
Definition CGExpr.cpp:3205
LValue EmitStringLiteralLValue(const StringLiteral *E)
Definition CGExpr.cpp:3491
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5884
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:1967
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1564
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5426
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2153
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:151
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5870
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
Definition CGExpr.cpp:2026
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5215
const TargetCodeGenInfo & getTargetHooks() const
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6247
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition CGExpr.cpp:215
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
LValue EmitVAArgExprLValue(const VAArgExpr *E)
Definition CGExpr.cpp:6272
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
Definition CGExpr.cpp:283
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitStmtExprLValue(const StmtExpr *E)
Definition CGExpr.cpp:6375
llvm::Value * EmitARCLoadWeakRetained(Address addr)
i8* @objc_loadWeakRetained(i8** addr)
Definition CGObjC.cpp:2643
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Definition CGExpr.cpp:103
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
Definition CGExpr.cpp:6351
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2533
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4228
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2331
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1532
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
LValue EmitCastLValue(const CastExpr *E)
EmitCastLValue - Casts are never lvalues unless that cast is to a reference type.
Definition CGExpr.cpp:5644
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
Definition CGExpr.cpp:508
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
Definition CGExpr.cpp:3049
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3610
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:293
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:264
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
Definition CGExpr.cpp:4999
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1558
CleanupKind getCleanupKind(QualType::DestructionKind kind)
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
Definition CGExpr.cpp:5836
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Definition CGExpr.cpp:6323
llvm::Type * ConvertTypeForMem(QualType T)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6257
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition CGExpr.cpp:2407
llvm::Value * EmitARCLoadWeak(Address addr)
i8* @objc_loadWeak(i8** addr) Essentially objc_autorelease(objc_loadWeakRetained(addr)).
Definition CGObjC.cpp:2636
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5164
void markStmtMaybeUsed(const Stmt *S)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6329
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCoyieldLValue(const CoyieldExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
Definition CGExpr.cpp:3932
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
Definition CGExpr.cpp:3424
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1515
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1596
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:186
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
Definition CGExpr.cpp:2997
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
Definition CGExpr.cpp:5903
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
Definition CGObjC.cpp:2160
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
Definition CGExpr.cpp:6301
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:734
Address EmitExtVectorElementLValue(LValue V)
Generates lvalue for partial ext_vector access.
Definition CGExpr.cpp:2491
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition CGExpr.cpp:323
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
void FlattenAccessAndType(Address Addr, QualType AddrTy, SmallVectorImpl< std::pair< Address, llvm::Value * > > &AccessList, SmallVectorImpl< QualType > &FlatTypes)
Definition CGExpr.cpp:6787
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition CGExpr.cpp:2444
static bool hasAggregateEvaluationKind(QualType T)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
Definition CGExpr.cpp:1573
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
Definition CGCall.cpp:4655
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:4768
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4213
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4140
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2183
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1228
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4128
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
Definition CGExpr.cpp:6286
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
generateDestroyHelper - Generates a helper function which, when invoked, destroys the given object.
LValue EmitMemberExpr(const MemberExpr *E)
Definition CGExpr.cpp:5067
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1864
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1631
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Store of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:2839
bool isOpaqueValueEmitted(const OpaqueValueExpr *E)
isOpaqueValueEmitted - Return true if the opaque value expression has already been emitted.
Definition CGExpr.cpp:5897
std::pair< llvm::Value *, CGPointerAuthInfo > EmitOrigPointerRValue(const Expr *E)
Retrieve a pointer rvalue and its ptrauth info.
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
Definition CGExpr.cpp:706
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
Definition CGExpr.cpp:1525
void EmitCountedByBoundsChecking(const Expr *E, llvm::Value *Idx, Address Addr, QualType IdxTy, QualType ArrayTy, bool Accessed, bool FlexibleArray)
EmitCountedByBoundsChecking - If the array being accessed has a "counted_by" attribute,...
Definition CGExpr.cpp:4515
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1311
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition CGExpr.cpp:3096
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition CGCXX.cpp:217
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A specialization of Address that requires the address to be an LLVM Constant.
Definition Address.h:296
llvm::Constant * getPointer() const
Definition Address.h:308
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
AlignmentSource getAlignmentSource() const
Definition CGValue.h:171
LValue - This represents an lvalue references.
Definition CGValue.h:182
bool isBitField() const
Definition CGValue.h:280
bool isMatrixElt() const
Definition CGValue.h:283
Expr * getBaseIvarExp() const
Definition CGValue.h:332
llvm::Constant * getExtVectorElts() const
Definition CGValue.h:409
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition CGValue.h:478
bool isObjCStrong() const
Definition CGValue.h:324
bool isGlobalObjCRef() const
Definition CGValue.h:306
bool isVectorElt() const
Definition CGValue.h:279
bool isSimple() const
Definition CGValue.h:278
bool isVolatileQualified() const
Definition CGValue.h:285
RValue asAggregateRValue() const
Definition CGValue.h:498
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition CGValue.h:395
llvm::Value * getGlobalReg() const
Definition CGValue.h:430
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:432
bool isVolatile() const
Definition CGValue.h:328
const Qualifiers & getQuals() const
Definition CGValue.h:338
bool isGlobalReg() const
Definition CGValue.h:282
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:452
bool isObjCWeak() const
Definition CGValue.h:321
Address getAddress() const
Definition CGValue.h:361
unsigned getVRQualifiers() const
Definition CGValue.h:287
LValue setKnownNonNull()
Definition CGValue.h:350
bool isNonGC() const
Definition CGValue.h:303
bool isExtVectorElt() const
Definition CGValue.h:281
llvm::Value * getVectorIdx() const
Definition CGValue.h:382
void setNontemporal(bool Value)
Definition CGValue.h:319
LValueBaseInfo getBaseInfo() const
Definition CGValue.h:346
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition CGValue.h:315
QualType getType() const
Definition CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:424
bool isThreadLocalRef() const
Definition CGValue.h:309
KnownNonNull_t isKnownNonNull() const
Definition CGValue.h:349
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:335
void setNonGC(bool Value)
Definition CGValue.h:304
Address getVectorAddress() const
Definition CGValue.h:370
bool isNontemporal() const
Definition CGValue.h:318
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition CGValue.h:468
bool isObjCIvar() const
Definition CGValue.h:297
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:442
void setAddress(Address address)
Definition CGValue.h:363
Address getExtVectorAddress() const
Definition CGValue.h:401
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:488
Address getMatrixAddress() const
Definition CGValue.h:387
Address getBitFieldAddress() const
Definition CGValue.h:415
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:108
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:71
An abstract representation of an aligned address.
Definition Address.h:42
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition Address.h:93
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:77
llvm::Value * getPointer() const
Definition Address.h:66
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition Address.h:83
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
QualType getElementType() const
Definition TypeBase.h:3285
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3539
bool isFileScope() const
Definition Expr.h:3571
const Expr * getInitializer() const
Definition Expr.h:3567
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4371
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition TypeBase.h:4389
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1474
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:484
ValueDecl * getDecl()
Definition Expr.h:1338
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1468
SourceLocation getLocation() const
Definition Expr.h:1346
T * getAttr() const
Definition DeclBase.h:573
SourceLocation getLocation() const
Definition DeclBase.h:439
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition DeclBase.cpp:553
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
ExplicitCastExpr - An explicit cast written in the source code.
Definition Expr.h:3862
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:80
bool isGLValue() const
Definition Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3100
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:444
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3073
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3061
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition Expr.h:285
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition Expr.h:284
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1542
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3624
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3053
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:476
QualType getType() const
Definition Expr.h:144
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition Expr.cpp:2984
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6498
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4371
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4403
const Expr * getBase() const
Definition Expr.h:6515
Represents a member of a struct/union/class.
Definition Decl.h:3157
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3260
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3242
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3393
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4796
FullExpr - Represents a "full-expression" node.
Definition Expr.h:1049
const Expr * getSubExpr() const
Definition Expr.h:1062
Represents a function declaration or definition.
Definition Decl.h:1999
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3703
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5264
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4460
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition Expr.h:7256
const OpaqueValueExpr * getCastedTemporary() const
Definition Expr.h:7307
const OpaqueValueExpr * getOpaqueArgLValue() const
Definition Expr.h:7288
bool isInOut() const
returns true if the parameter is inout and false if the parameter is out.
Definition Expr.h:7315
const Expr * getWritebackCast() const
Definition Expr.h:7302
const Expr * getArgLValue() const
Return the l-value expression that was written as the argument in source.
Definition Expr.h:7297
Describes an C or C++ initializer list.
Definition Expr.h:5233
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2457
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4914
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4939
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4931
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4964
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition Expr.h:2799
bool isIncomplete() const
Definition Expr.h:2819
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3381
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3522
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3651
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition NSAPI.cpp:481
This represents a decl that may have a name.
Definition Decl.h:273
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
A C++ nested-name-specifier augmented with source location information.
ObjCEncodeExpr, used for @encode in Objective-C.
Definition ExprObjC.h:409
Represents an ObjC class declaration.
Definition DeclObjC.h:1154
ObjCIvarDecl - Represents an ObjC instance variable.
Definition DeclObjC.h:1952
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition ExprObjC.h:548
ObjCIvarDecl * getDecl()
Definition ExprObjC.h:578
bool isArrow() const
Definition ExprObjC.h:586
const Expr * getBase() const
Definition ExprObjC.h:582
An expression that sends a message to the given Objective-C object or class.
Definition ExprObjC.h:940
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1364
QualType getReturnType() const
Definition DeclObjC.h:329
ObjCSelectorExpr used for @selector in Objective-C.
Definition ExprObjC.h:454
Selector getSelector() const
Definition ExprObjC.h:468
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
const Expr * getSubExpr() const
Definition Expr.h:2199
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
QualType getPointeeType() const
Definition TypeBase.h:3338
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
StringRef getIdentKindName() const
Definition Expr.h:2062
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2040
StringLiteral * getFunctionName()
Definition Expr.h:2049
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6690
semantics_iterator semantics_end()
Definition Expr.h:6755
semantics_iterator semantics_begin()
Definition Expr.h:6751
const Expr *const * const_semantics_iterator
Definition Expr.h:6750
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition Expr.h:6738
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8369
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1453
QualType withoutLocalFastQualifiers() const
Definition TypeBase.h:1214
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8411
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8325
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8470
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8379
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1179
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition TypeBase.h:1036
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool hasConst() const
Definition TypeBase.h:457
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void removeObjCGCAttr()
Definition TypeBase.h:523
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
void removePointerAuth()
Definition TypeBase.h:610
void setAddressSpace(LangAS space)
Definition TypeBase.h:591
bool hasVolatile() const
Definition TypeBase.h:467
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:603
ObjCLifetime getObjCLifetime() const
Definition TypeBase.h:545
Represents a struct/union/class.
Definition Decl.h:4309
field_range fields() const
Definition Decl.h:4512
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition Decl.h:4493
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition Expr.h:4529
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:346
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
bool isUnion() const
Definition Decl.h:3919
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual StringRef getABI() const
Get the ABI currently in use.
The base class of the type hierarchy.
Definition TypeBase.h:1833
bool isBlockPointerType() const
Definition TypeBase.h:8542
bool isVoidType() const
Definition TypeBase.h:8878
bool hasPointeeToToCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8574
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:418
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition Type.cpp:1951
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9174
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8625
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isArrayType() const
Definition TypeBase.h:8621
bool isFunctionPointerType() const
Definition TypeBase.h:8589
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isConstantMatrixType() const
Definition TypeBase.h:8683
bool isPointerType() const
Definition TypeBase.h:8522
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8922
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9165
bool isReferenceType() const
Definition TypeBase.h:8546
bool isEnumeralType() const
Definition TypeBase.h:8653
bool isVariableArrayType() const
Definition TypeBase.h:8633
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isExtVectorBoolType() const
Definition TypeBase.h:8669
bool isBitIntType() const
Definition TypeBase.h:8787
bool isAnyComplexType() const
Definition TypeBase.h:8657
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9051
bool isAtomicType() const
Definition TypeBase.h:8704
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isObjectType() const
Determine whether this type is an object type.
Definition TypeBase.h:2510
bool isHLSLResourceRecord() const
Definition Type.cpp:5370
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
Definition Type.h:53
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2436
bool isFunctionType() const
Definition TypeBase.h:8518
bool isObjCObjectPointerType() const
Definition TypeBase.h:8691
bool isVectorType() const
Definition TypeBase.h:8661
bool isAnyPointerType() const
Definition TypeBase.h:8530
bool isSubscriptableVectorType() const
Definition TypeBase.h:8675
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9098
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition Type.cpp:653
bool isRecordType() const
Definition TypeBase.h:8649
bool isHLSLResourceRecordArray() const
Definition Type.cpp:5374
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2354
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8568
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4891
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:711
QualType getType() const
Definition Decl.h:722
QualType getType() const
Definition Value.cpp:237
Represents a variable declaration or definition.
Definition Decl.h:925
TLSKind getTLSKind() const
Definition Decl.cpp:2168
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2366
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1183
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:951
@ TLS_None
Not a TLS variable.
Definition Decl.h:945
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3964
Represents a GCC generic vector type.
Definition TypeBase.h:4173
unsigned getNumElements() const
Definition TypeBase.h:4188
#define INT_MIN
Definition limits.h:55
Definition SPIR.cpp:35
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition CGValue.h:141
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:154
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:145
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ ARCImpreciseLifetime
Definition CGValue.h:136
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition CGValue.h:159
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition Specifiers.h:154
@ SC_Register
Definition Specifiers.h:257
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition Specifiers.h:339
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
LangAS
Defines the address space values used by the address space qualifier of QualType.
llvm::cl::opt< bool > ClSanitizeGuardChecks
SmallVector< CXXBaseSpecifier *, 4 > CXXCastPath
A simple array of base specifiers.
Definition ASTContext.h:117
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
Definition TypeBase.h:5868
bool isLambdaMethod(const DeclContext *DC)
Definition ASTLambda.h:39
@ Other
Other implicit parameter.
Definition Decl.h:1745
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
uint64_t Offset
Offset - The byte offset of the final access within the base one.
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
llvm::MDNode * BaseType
BaseType - The base/leading access type.
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition Expr.h:68